smgc commited on
Commit
ae87889
1 Parent(s): d328605

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -6
app.py CHANGED
@@ -3,29 +3,41 @@ from flask import Flask, request, Response
3
  import requests
4
  import json
5
  import os
 
6
  import logging
7
  from helper import create_jwt
8
 
9
  app = Flask(__name__)
10
 
11
  # 配置日志
12
- logging.basicConfig(level=logging.INFO)
 
 
 
 
 
 
13
  logger = logging.getLogger(__name__)
14
 
15
- @app.route('/ai/v1/chat/completions', methods=['POST'])
16
  async def chat():
17
  """
18
  Handle chat completion requests.
19
  """
 
 
20
  # 记录请求信息
21
- logger.info(f"Received request: {request.method} {request.url}")
22
- logger.info(f"Headers: {request.headers}")
 
23
 
24
  # Get the payload from the request
25
  payload = request.json
 
26
 
27
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
28
  model = payload.get('model', 'claude-3-5-sonnet-20240620')
 
29
 
30
  # Extract GitHub username and Zed user ID from Authorization header
31
  auth_header = request.headers.get('Authorization')
@@ -42,6 +54,7 @@ async def chat():
42
 
43
  # Prepare the request for the LLM API
44
  url = "https://llm.zed.dev/completion"
 
45
 
46
  llm_payload = {
47
  "provider": "anthropic",
@@ -55,6 +68,7 @@ async def chat():
55
  "system": ""
56
  }
57
  }
 
58
 
59
  jwt = create_jwt(github_username, int(zed_user_id))
60
  logger.info(f"Generated JWT token: {jwt}")
@@ -66,28 +80,34 @@ async def chat():
66
  'authorization': f'Bearer {jwt}',
67
  'user-agent': 'Zed/0.149.3 (macos; aarch64)'
68
  }
 
69
 
70
  # Get proxy from environment variable
71
  proxy = os.environ.get('HTTP_PROXY', None)
72
  proxies = {'http': proxy, 'https': proxy} if proxy else None
 
73
 
74
  async def generate():
75
  try:
 
76
  with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies, allow_redirects=True) as response:
77
  logger.info(f"LLM API response status: {response.status_code}")
78
- logger.info(f"LLM API response headers: {response.headers}")
79
 
80
  if response.status_code == 301:
81
- logger.warning(f"Received 301 redirect. New location: {response.headers.get('Location')}")
 
82
  # 如果需要,可以在这里处理重定向
83
 
84
  for chunk in response.iter_content(chunk_size=1024):
85
  if chunk:
 
86
  yield chunk
87
  except Exception as e:
88
  logger.error(f"Error during API request: {str(e)}")
89
  yield str(e).encode()
90
 
 
91
  return Response(generate(), content_type='application/octet-stream')
92
 
93
  # Convert the Flask app to an ASGI app
@@ -95,4 +115,5 @@ asgi_app = WsgiToAsgi(app)
95
 
96
  if __name__ == '__main__':
97
  import uvicorn
 
98
  uvicorn.run(asgi_app, host="0.0.0.0", port=8000)
 
3
  import requests
4
  import json
5
  import os
6
+ import sys
7
  import logging
8
  from helper import create_jwt
9
 
10
  app = Flask(__name__)
11
 
12
  # 配置日志
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format='%(asctime)s [%(levelname)s] %(message)s',
16
+ handlers=[
17
+ logging.StreamHandler(sys.stdout)
18
+ ]
19
+ )
20
  logger = logging.getLogger(__name__)
21
 
22
+ @app.route('/chat/completions', methods=['POST'])
23
  async def chat():
24
  """
25
  Handle chat completion requests.
26
  """
27
+ logger.info("Received chat completion request")
28
+
29
  # 记录请求信息
30
+ logger.info(f"Request method: {request.method}")
31
+ logger.info(f"Request URL: {request.url}")
32
+ logger.info(f"Request headers: {dict(request.headers)}")
33
 
34
  # Get the payload from the request
35
  payload = request.json
36
+ logger.info(f"Request payload: {json.dumps(payload, indent=2)}")
37
 
38
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
39
  model = payload.get('model', 'claude-3-5-sonnet-20240620')
40
+ logger.info(f"Using model: {model}")
41
 
42
  # Extract GitHub username and Zed user ID from Authorization header
43
  auth_header = request.headers.get('Authorization')
 
54
 
55
  # Prepare the request for the LLM API
56
  url = "https://llm.zed.dev/completion"
57
+ logger.info(f"LLM API URL: {url}")
58
 
59
  llm_payload = {
60
  "provider": "anthropic",
 
68
  "system": ""
69
  }
70
  }
71
+ logger.info(f"LLM API payload: {json.dumps(llm_payload, indent=2)}")
72
 
73
  jwt = create_jwt(github_username, int(zed_user_id))
74
  logger.info(f"Generated JWT token: {jwt}")
 
80
  'authorization': f'Bearer {jwt}',
81
  'user-agent': 'Zed/0.149.3 (macos; aarch64)'
82
  }
83
+ logger.info(f"Request headers: {json.dumps(headers, indent=2)}")
84
 
85
  # Get proxy from environment variable
86
  proxy = os.environ.get('HTTP_PROXY', None)
87
  proxies = {'http': proxy, 'https': proxy} if proxy else None
88
+ logger.info(f"Using proxy: {proxy}")
89
 
90
  async def generate():
91
  try:
92
+ logger.info("Sending request to LLM API")
93
  with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies, allow_redirects=True) as response:
94
  logger.info(f"LLM API response status: {response.status_code}")
95
+ logger.info(f"LLM API response headers: {dict(response.headers)}")
96
 
97
  if response.status_code == 301:
98
+ new_location = response.headers.get('Location')
99
+ logger.warning(f"Received 301 redirect. New location: {new_location}")
100
  # 如果需要,可以在这里处理重定向
101
 
102
  for chunk in response.iter_content(chunk_size=1024):
103
  if chunk:
104
+ logger.debug(f"Received chunk of size: {len(chunk)} bytes")
105
  yield chunk
106
  except Exception as e:
107
  logger.error(f"Error during API request: {str(e)}")
108
  yield str(e).encode()
109
 
110
+ logger.info("Returning streaming response")
111
  return Response(generate(), content_type='application/octet-stream')
112
 
113
  # Convert the Flask app to an ASGI app
 
115
 
116
  if __name__ == '__main__':
117
  import uvicorn
118
+ logger.info("Starting the application")
119
  uvicorn.run(asgi_app, host="0.0.0.0", port=8000)