zed2api / app.py
smgc's picture
Update app.py
6ff3d3b verified
raw
history blame
2.56 kB
from fastapi import FastAPI, Request, Response
from fastapi.responses import StreamingResponse
import httpx
import json
import random
import os
from helper import create_jwt, generate_random_tuple
app = FastAPI()
@app.post('/ai/v1/chat/completions')
async def chat(request: Request):
"""
Handle chat completion requests.
This function processes incoming POST requests to the '/chat/completions' endpoint.
It prepares the payload for the LLM API, generates a JWT for authentication,
and streams the response from the LLM API back to the client.
Returns:
StreamingResponse: A streaming response containing the LLM API's output.
Note:
- The function uses environment variables for proxy configuration.
- It generates random GitHub username and Zed user ID for each request.
- The LLM model defaults to "claude-3-5-sonnet-20240620" if not specified.
"""
# Get the payload from the request
payload = await request.json()
# Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
model = payload.get('model', 'claude-3-5-sonnet-20240620')
# Prepare the request for the LLM API
url = "https://llm.zed.dev/completion?"
llm_payload = {
"provider": "anthropic",
"model": model,
"provider_request": {
"model": model,
"max_tokens": payload.get('max_tokens', 8192),
"temperature": payload.get('temperature', 0),
"top_p": payload.get('top_p', 0.7),
"messages": payload['messages'],
"system": ""
}
}
github_username, zed_user_id = generate_random_tuple()
jwt = create_jwt(github_username, zed_user_id)
headers = {
'Host': 'llm.zed.dev',
'accept': '*/*',
'content-type': 'application/json',
'authorization': f'Bearer {jwt}',
'user-agent': 'Zed/0.149.3 (macos; aarch64)'
}
# Get proxy from environment variable
proxy = os.environ.get('HTTP_PROXY', None)
proxies = {'http': proxy, 'https': proxy} if proxy else None
async def generate():
async with httpx.AsyncClient(proxies=proxies) as client:
async with client.stream('POST', url, headers=headers, json=llm_payload) as response:
async for chunk in response.aiter_bytes():
yield chunk
return StreamingResponse(generate(), media_type='application/octet-stream')
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)