Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from fastapi import FastAPI, Request
|
2 |
from fastapi.responses import StreamingResponse
|
3 |
import httpx
|
4 |
import json
|
@@ -13,7 +13,7 @@ app = FastAPI()
|
|
13 |
async def chat(request: Request):
|
14 |
"""
|
15 |
Handle chat completion requests.
|
16 |
-
This function processes incoming POST requests to the '/chat/completions' endpoint.
|
17 |
It prepares the payload for the LLM API, generates a JWT for authentication,
|
18 |
and streams the response from the LLM API back to the client.
|
19 |
Returns:
|
@@ -68,6 +68,10 @@ async def chat(request: Request):
|
|
68 |
|
69 |
return StreamingResponse(generate(), media_type='application/octet-stream')
|
70 |
|
|
|
|
|
|
|
|
|
71 |
if __name__ == '__main__':
|
72 |
import uvicorn
|
73 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
from fastapi.responses import StreamingResponse
|
3 |
import httpx
|
4 |
import json
|
|
|
13 |
async def chat(request: Request):
|
14 |
"""
|
15 |
Handle chat completion requests.
|
16 |
+
This function processes incoming POST requests to the '/ai/v1/chat/completions' endpoint.
|
17 |
It prepares the payload for the LLM API, generates a JWT for authentication,
|
18 |
and streams the response from the LLM API back to the client.
|
19 |
Returns:
|
|
|
68 |
|
69 |
return StreamingResponse(generate(), media_type='application/octet-stream')
|
70 |
|
71 |
+
@app.get("/")
|
72 |
+
async def root():
|
73 |
+
return {"message": "Welcome to the AI Chat Completions API"}
|
74 |
+
|
75 |
if __name__ == '__main__':
|
76 |
import uvicorn
|
77 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|