zed2api / app.py
smgc's picture
Update app.py
9e063bf verified
raw
history blame
1.96 kB
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import httpx
import json
import os
from helper import create_jwt, generate_random_tuple
app = FastAPI()
@app.post('/ai/v1/chat/completions')
async def chat(request: Request):
# Generate JWT token
github_username, user_id = generate_random_tuple()
jwt_token = create_jwt(github_username, user_id)
# Get the payload from the request
payload = await request.json()
# Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
model = payload.get('model', 'claude-3-5-sonnet-20240620')
# Prepare the request for the LLM API
url = "https://llm.zed.dev/completion?"
llm_payload = {
"provider": "anthropic",
"model": model,
"provider_request": {
"model": model,
"max_tokens": payload.get('max_tokens', 8192),
"temperature": payload.get('temperature', 0),
"top_p": payload.get('top_p', 0.7),
"messages": payload['messages'],
"system": ""
}
}
headers = {
'Host': 'llm.zed.dev',
'accept': '*/*',
'content-type': 'application/json',
'authorization': f'Bearer {jwt_token}', # Use the generated JWT token
'user-agent': 'Zed/0.149.3 (macos; aarch64)'
}
# Get proxy from environment variable
proxy = os.environ.get('HTTP_PROXY', None)
proxies = {'http': proxy, 'https': proxy} if proxy else None
async def generate():
async with httpx.AsyncClient(proxies=proxies) as client:
async with client.stream('POST', url, headers=headers, json=llm_payload) as response:
async for chunk in response.aiter_bytes():
yield chunk
return StreamingResponse(generate(), media_type='application/octet-stream')
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)