Spaces:
Running
Running
gordonchan
commited on
Commit
•
020aaa1
1
Parent(s):
ca56e6a
Upload server.py
Browse files
server.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from api.config import SETTINGS
|
2 |
+
from api.models import app, EMBEDDED_MODEL, GENERATE_ENGINE
|
3 |
+
|
4 |
+
|
5 |
+
prefix = SETTINGS.api_prefix
|
6 |
+
|
7 |
+
if EMBEDDED_MODEL is not None:
|
8 |
+
from api.routes.embedding import embedding_router
|
9 |
+
|
10 |
+
app.include_router(embedding_router, prefix=prefix, tags=["Embedding"])
|
11 |
+
|
12 |
+
|
13 |
+
if GENERATE_ENGINE is not None:
|
14 |
+
from api.routes import model_router
|
15 |
+
|
16 |
+
app.include_router(model_router, prefix=prefix, tags=["Model"])
|
17 |
+
|
18 |
+
if SETTINGS.engine == "vllm":
|
19 |
+
from api.vllm_routes import chat_router as chat_router
|
20 |
+
from api.vllm_routes import completion_router as completion_router
|
21 |
+
|
22 |
+
elif SETTINGS.engine == "llama.cpp":
|
23 |
+
from api.llama_cpp_routes import chat_router as chat_router
|
24 |
+
from api.llama_cpp_routes import completion_router as completion_router
|
25 |
+
|
26 |
+
elif SETTINGS.engine == "tgi":
|
27 |
+
from api.tgi_routes import chat_router as chat_router
|
28 |
+
from api.tgi_routes.completion import completion_router as completion_router
|
29 |
+
|
30 |
+
else:
|
31 |
+
from api.routes.chat import chat_router as chat_router
|
32 |
+
from api.routes.completion import completion_router as completion_router
|
33 |
+
|
34 |
+
app.include_router(chat_router, prefix=prefix, tags=["Chat Completion"])
|
35 |
+
app.include_router(completion_router, prefix=prefix, tags=["Completion"])
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == '__main__':
|
39 |
+
import uvicorn
|
40 |
+
uvicorn.run(app, host=SETTINGS.host, port=SETTINGS.port, log_level="info")
|