Spaces:
Sleeping
Sleeping
updates
Browse files- requirements.txt +1 -0
- run.py +9 -1
requirements.txt
CHANGED
@@ -27,3 +27,4 @@ notebook
|
|
27 |
jupyter
|
28 |
g4f
|
29 |
langchain-community
|
|
|
|
27 |
jupyter
|
28 |
g4f
|
29 |
langchain-community
|
30 |
+
ollama
|
run.py
CHANGED
@@ -6,9 +6,17 @@ from server.backend import Backend_Api
|
|
6 |
from server.babel import create_babel
|
7 |
from json import load
|
8 |
from flask import Flask
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
if __name__ == '__main__':
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
# Load configuration from config.json
|
13 |
config = load(open('config.json', 'r'))
|
14 |
site_config = config['site_config']
|
|
|
6 |
from server.babel import create_babel
|
7 |
from json import load
|
8 |
from flask import Flask
|
9 |
+
import ollama
|
10 |
+
|
11 |
+
# Optionally specify the model to pull during startup:
|
12 |
+
model_name = "llama3" # Replace with the desired model name
|
13 |
|
|
|
14 |
|
15 |
+
if __name__ == '__main__':
|
16 |
+
import os
|
17 |
+
#os.system(" ollama serve")
|
18 |
+
# Start Ollama in server mode:
|
19 |
+
ollama.serve(pull=model_name)
|
20 |
# Load configuration from config.json
|
21 |
config = load(open('config.json', 'r'))
|
22 |
site_config = config['site_config']
|