smgc commited on
Commit
3fb88cc
1 Parent(s): 88f8e77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -18,7 +18,7 @@ logging.basicConfig(
18
  )
19
  logger = logging.getLogger(__name__)
20
 
21
- @app.route('/ai/v1/chat/completions', methods=['POST'])
22
  async def chat():
23
  """
24
  Handle chat completion requests.
@@ -31,7 +31,7 @@ async def chat():
31
  logger.info(f"Request headers: {dict(request.headers)}")
32
 
33
  # Get the payload from the request
34
- payload = request.json
35
  logger.info(f"Request payload: {json.dumps(payload, indent=2)}")
36
 
37
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
@@ -86,10 +86,10 @@ async def chat():
86
  proxies = {'http': proxy, 'https': proxy} if proxy else None
87
  logger.info(f"Using proxy: {proxy}")
88
 
89
- def generate():
90
  try:
91
  logger.info("Sending request to LLM API")
92
- with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies, allow_redirects=True) as response:
93
  logger.info(f"LLM API response status: {response.status_code}")
94
  logger.info(f"LLM API response headers: {dict(response.headers)}")
95
 
@@ -98,7 +98,7 @@ async def chat():
98
  logger.warning(f"Received 301 redirect. New location: {new_location}")
99
  # 如果需要,可以在这里处理重定向
100
 
101
- for chunk in response.iter_content(chunk_size=1024):
102
  if chunk:
103
  logger.debug(f"Received chunk of size: {len(chunk)} bytes")
104
  yield chunk
@@ -110,9 +110,13 @@ async def chat():
110
  return Response(generate(), content_type='application/octet-stream')
111
 
112
  @app.route('/', methods=['GET'])
113
- def home():
114
  return "Welcome to the Chat Completion API", 200
115
 
 
 
 
116
  if __name__ == '__main__':
 
117
  logger.info("Starting the application")
118
- app.run(host="0.0.0.0", port=8000)
 
18
  )
19
  logger = logging.getLogger(__name__)
20
 
21
+ @app.route('/chat/completions', methods=['POST'])
22
  async def chat():
23
  """
24
  Handle chat completion requests.
 
31
  logger.info(f"Request headers: {dict(request.headers)}")
32
 
33
  # Get the payload from the request
34
+ payload = await request.get_json()
35
  logger.info(f"Request payload: {json.dumps(payload, indent=2)}")
36
 
37
  # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
 
86
  proxies = {'http': proxy, 'https': proxy} if proxy else None
87
  logger.info(f"Using proxy: {proxy}")
88
 
89
+ async def generate():
90
  try:
91
  logger.info("Sending request to LLM API")
92
+ async with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies, allow_redirects=True) as response:
93
  logger.info(f"LLM API response status: {response.status_code}")
94
  logger.info(f"LLM API response headers: {dict(response.headers)}")
95
 
 
98
  logger.warning(f"Received 301 redirect. New location: {new_location}")
99
  # 如果需要,可以在这里处理重定向
100
 
101
+ async for chunk in response.iter_content(chunk_size=1024):
102
  if chunk:
103
  logger.debug(f"Received chunk of size: {len(chunk)} bytes")
104
  yield chunk
 
110
  return Response(generate(), content_type='application/octet-stream')
111
 
112
  @app.route('/', methods=['GET'])
113
+ async def home():
114
  return "Welcome to the Chat Completion API", 200
115
 
116
+ # 创建 ASGI 应用
117
+ asgi_app = app.asgi_app
118
+
119
  if __name__ == '__main__':
120
+ import uvicorn
121
  logger.info("Starting the application")
122
+ uvicorn.run("app:asgi_app", host="0.0.0.0", port=8000, log_level="info")