smgc commited on
Commit
306d21c
1 Parent(s): 9e063bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -4
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import StreamingResponse
3
  import httpx
@@ -5,16 +6,24 @@ import json
5
  import os
6
  from helper import create_jwt, generate_random_tuple
7
 
 
 
 
 
8
  app = FastAPI()
9
 
10
  @app.post('/ai/v1/chat/completions')
11
  async def chat(request: Request):
 
 
12
  # Generate JWT token
13
  github_username, user_id = generate_random_tuple()
14
  jwt_token = create_jwt(github_username, user_id)
 
15
 
16
  # Get the payload from the request
17
  payload = await request.json()
 
18
 
19
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
20
  model = payload.get('model', 'claude-3-5-sonnet-20240620')
@@ -34,27 +43,45 @@ async def chat(request: Request):
34
  "system": ""
35
  }
36
  }
 
37
 
38
  headers = {
39
  'Host': 'llm.zed.dev',
40
  'accept': '*/*',
41
  'content-type': 'application/json',
42
- 'authorization': f'Bearer {jwt_token}', # Use the generated JWT token
43
  'user-agent': 'Zed/0.149.3 (macos; aarch64)'
44
  }
 
45
 
46
  # Get proxy from environment variable
47
  proxy = os.environ.get('HTTP_PROXY', None)
48
  proxies = {'http': proxy, 'https': proxy} if proxy else None
 
49
 
50
  async def generate():
51
  async with httpx.AsyncClient(proxies=proxies) as client:
52
- async with client.stream('POST', url, headers=headers, json=llm_payload) as response:
53
- async for chunk in response.aiter_bytes():
54
- yield chunk
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  return StreamingResponse(generate(), media_type='application/octet-stream')
57
 
 
 
 
 
58
  if __name__ == '__main__':
59
  import uvicorn
60
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
+ import logging
2
  from fastapi import FastAPI, Request
3
  from fastapi.responses import StreamingResponse
4
  import httpx
 
6
  import os
7
  from helper import create_jwt, generate_random_tuple
8
 
9
+ # 设置日志
10
+ logging.basicConfig(level=logging.DEBUG)
11
+ logger = logging.getLogger(__name__)
12
+
13
  app = FastAPI()
14
 
15
  @app.post('/ai/v1/chat/completions')
16
  async def chat(request: Request):
17
+ logger.debug("Received request")
18
+
19
  # Generate JWT token
20
  github_username, user_id = generate_random_tuple()
21
  jwt_token = create_jwt(github_username, user_id)
22
+ logger.debug(f"Generated JWT token: {jwt_token}")
23
 
24
  # Get the payload from the request
25
  payload = await request.json()
26
+ logger.debug(f"Received payload: {payload}")
27
 
28
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
29
  model = payload.get('model', 'claude-3-5-sonnet-20240620')
 
43
  "system": ""
44
  }
45
  }
46
+ logger.debug(f"LLM payload: {llm_payload}")
47
 
48
  headers = {
49
  'Host': 'llm.zed.dev',
50
  'accept': '*/*',
51
  'content-type': 'application/json',
52
+ 'authorization': f'Bearer {jwt_token}',
53
  'user-agent': 'Zed/0.149.3 (macos; aarch64)'
54
  }
55
+ logger.debug(f"Request headers: {headers}")
56
 
57
  # Get proxy from environment variable
58
  proxy = os.environ.get('HTTP_PROXY', None)
59
  proxies = {'http': proxy, 'https': proxy} if proxy else None
60
+ logger.debug(f"Using proxies: {proxies}")
61
 
62
  async def generate():
63
  async with httpx.AsyncClient(proxies=proxies) as client:
64
+ try:
65
+ async with client.stream('POST', url, headers=headers, json=llm_payload) as response:
66
+ logger.debug(f"LLM API response status: {response.status_code}")
67
+ logger.debug(f"LLM API response headers: {response.headers}")
68
+ if response.status_code != 200:
69
+ error_content = await response.read()
70
+ logger.error(f"LLM API error response: {error_content}")
71
+ yield f"Error: {response.status_code} - {error_content}"
72
+ else:
73
+ async for chunk in response.aiter_bytes():
74
+ yield chunk
75
+ except Exception as e:
76
+ logger.error(f"Error during LLM API request: {str(e)}")
77
+ yield f"Error: {str(e)}"
78
 
79
  return StreamingResponse(generate(), media_type='application/octet-stream')
80
 
81
+ @app.get("/")
82
+ async def root():
83
+ return {"message": "Welcome to the AI Chat Completions API"}
84
+
85
  if __name__ == '__main__':
86
  import uvicorn
87
  uvicorn.run(app, host="0.0.0.0", port=8000)