Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,8 @@ logger = logging.getLogger(__name__)
|
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
|
|
|
|
16 |
@app.post('/ai/v1/chat/completions')
|
17 |
async def chat(request: Request):
|
18 |
logger.debug("Received request")
|
@@ -72,7 +74,7 @@ async def chat(request: Request):
|
|
72 |
async def generate():
|
73 |
async with httpx.AsyncClient(proxies=proxies) as client:
|
74 |
try:
|
75 |
-
async with client.stream('POST',
|
76 |
logger.debug(f"LLM API response status: {response.status_code}")
|
77 |
logger.debug(f"LLM API response headers: {response.headers}")
|
78 |
if response.status_code != 200:
|
|
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
16 |
+
LLM_API_URL = "https://llm.zed.dev/completion?"
|
17 |
+
|
18 |
@app.post('/ai/v1/chat/completions')
|
19 |
async def chat(request: Request):
|
20 |
logger.debug("Received request")
|
|
|
74 |
async def generate():
|
75 |
async with httpx.AsyncClient(proxies=proxies) as client:
|
76 |
try:
|
77 |
+
async with client.stream('POST', LLM_API_URL, headers=headers, json=llm_payload) as response:
|
78 |
logger.debug(f"LLM API response status: {response.status_code}")
|
79 |
logger.debug(f"LLM API response headers: {response.headers}")
|
80 |
if response.status_code != 200:
|