smgc commited on
Commit
d328605
1 Parent(s): 0951e8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -17
app.py CHANGED
@@ -3,27 +3,24 @@ from flask import Flask, request, Response
3
  import requests
4
  import json
5
  import os
 
6
  from helper import create_jwt
7
 
8
  app = Flask(__name__)
9
 
 
 
 
 
10
  @app.route('/ai/v1/chat/completions', methods=['POST'])
11
  async def chat():
12
  """
13
  Handle chat completion requests.
14
-
15
- This function processes incoming POST requests to the '/chat/completions' endpoint.
16
- It prepares the payload for the LLM API, generates a JWT for authentication,
17
- and streams the response from the LLM API back to the client.
18
-
19
- Returns:
20
- Response: A streaming response containing the LLM API's output.
21
-
22
- Note:
23
- - The function uses environment variables for proxy configuration.
24
- - It extracts GitHub username and Zed user ID from the Authorization header.
25
- - The LLM model defaults to "claude-3-5-sonnet-20240620" if not specified.
26
  """
 
 
 
 
27
  # Get the payload from the request
28
  payload = request.json
29
 
@@ -33,15 +30,18 @@ async def chat():
33
  # Extract GitHub username and Zed user ID from Authorization header
34
  auth_header = request.headers.get('Authorization')
35
  if not auth_header or not auth_header.startswith('Bearer '):
 
36
  return Response('Invalid Authorization header', status=401)
37
 
38
  try:
39
  github_username, zed_user_id = auth_header[7:].split(',')
 
40
  except ValueError:
 
41
  return Response('Invalid Authorization header format', status=401)
42
 
43
  # Prepare the request for the LLM API
44
- url = "https://llm.zed.dev/completion?"
45
 
46
  llm_payload = {
47
  "provider": "anthropic",
@@ -57,6 +57,7 @@ async def chat():
57
  }
58
 
59
  jwt = create_jwt(github_username, int(zed_user_id))
 
60
 
61
  headers = {
62
  'Host': 'llm.zed.dev',
@@ -71,10 +72,21 @@ async def chat():
71
  proxies = {'http': proxy, 'https': proxy} if proxy else None
72
 
73
  async def generate():
74
- with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies) as response:
75
- for chunk in response.iter_content(chunk_size=1024):
76
- if chunk:
77
- yield chunk
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  return Response(generate(), content_type='application/octet-stream')
80
 
 
3
  import requests
4
  import json
5
  import os
6
+ import logging
7
  from helper import create_jwt
8
 
9
  app = Flask(__name__)
10
 
11
+ # 配置日志
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
  @app.route('/ai/v1/chat/completions', methods=['POST'])
16
  async def chat():
17
  """
18
  Handle chat completion requests.
 
 
 
 
 
 
 
 
 
 
 
 
19
  """
20
+ # 记录请求信息
21
+ logger.info(f"Received request: {request.method} {request.url}")
22
+ logger.info(f"Headers: {request.headers}")
23
+
24
  # Get the payload from the request
25
  payload = request.json
26
 
 
30
  # Extract GitHub username and Zed user ID from Authorization header
31
  auth_header = request.headers.get('Authorization')
32
  if not auth_header or not auth_header.startswith('Bearer '):
33
+ logger.error("Invalid Authorization header")
34
  return Response('Invalid Authorization header', status=401)
35
 
36
  try:
37
  github_username, zed_user_id = auth_header[7:].split(',')
38
+ logger.info(f"GitHub username: {github_username}, Zed user ID: {zed_user_id}")
39
  except ValueError:
40
+ logger.error("Invalid Authorization header format")
41
  return Response('Invalid Authorization header format', status=401)
42
 
43
  # Prepare the request for the LLM API
44
+ url = "https://llm.zed.dev/completion"
45
 
46
  llm_payload = {
47
  "provider": "anthropic",
 
57
  }
58
 
59
  jwt = create_jwt(github_username, int(zed_user_id))
60
+ logger.info(f"Generated JWT token: {jwt}")
61
 
62
  headers = {
63
  'Host': 'llm.zed.dev',
 
72
  proxies = {'http': proxy, 'https': proxy} if proxy else None
73
 
74
  async def generate():
75
+ try:
76
+ with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies, allow_redirects=True) as response:
77
+ logger.info(f"LLM API response status: {response.status_code}")
78
+ logger.info(f"LLM API response headers: {response.headers}")
79
+
80
+ if response.status_code == 301:
81
+ logger.warning(f"Received 301 redirect. New location: {response.headers.get('Location')}")
82
+ # 如果需要,可以在这里处理重定向
83
+
84
+ for chunk in response.iter_content(chunk_size=1024):
85
+ if chunk:
86
+ yield chunk
87
+ except Exception as e:
88
+ logger.error(f"Error during API request: {str(e)}")
89
+ yield str(e).encode()
90
 
91
  return Response(generate(), content_type='application/octet-stream')
92