smgc commited on
Commit
b074ef9
1 Parent(s): dedefcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -48
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from asgiref.wsgi import WsgiToAsgi
2
- from flask import Flask, request, Response
3
  import requests
4
  import json
5
  import random
@@ -53,63 +53,90 @@ def create_jwt(github_user_login: str, user_id: int) -> str:
53
  logger.info(f"Creating JWT for user: {github_user_login}")
54
  return jwt.encode(payload, 'llm-secret', algorithm='HS256')
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  @app.route('/chat/completions', methods=['POST'])
57
  async def chat():
58
  """
59
  Handle chat completion requests.
60
  """
61
- logger.info("Received chat completion request")
62
-
63
- # Get the payload from the request
64
- payload = request.json
65
- logger.info(f"Request payload: {payload}")
66
-
67
- # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
68
- model = payload.get('model', 'claude-3-5-sonnet-20240620')
69
- logger.info(f"Using model: {model}")
70
-
71
- # Prepare the request for the LLM API
72
- url = "https://llm.zed.dev/completion?"
73
-
74
- llm_payload = {
75
- "provider": "anthropic",
76
- "model": model,
77
- "provider_request": {
78
  "model": model,
79
- "max_tokens": payload.get('max_tokens', 8192),
80
- "temperature": payload.get('temperature', 0),
81
- "top_p": payload.get('top_p', 0.7),
82
- "messages": payload['messages'],
83
- "system": ""
 
 
 
84
  }
85
- }
86
 
87
- github_username, zed_user_id = get_github_username_zed_userid_list()[0]
88
- jwt_token = create_jwt(github_username, zed_user_id)
89
 
90
- headers = {
91
- 'Host': 'llm.zed.dev',
92
- 'accept': '*/*',
93
- 'content-type': 'application/json',
94
- 'authorization': f'Bearer {jwt_token}',
95
- 'user-agent': 'Zed/0.149.3 (macos; aarch64)'
96
- }
97
 
98
- # Get proxy from environment variable
99
- proxy = os.environ.get('HTTP_PROXY', None)
100
- proxies = {'http': proxy, 'https': proxy} if proxy else None
101
- logger.info(f"Using proxy: {proxy}")
102
-
103
- async def generate():
104
- logger.info("Starting to stream response")
105
- with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies) as response:
106
- logger.info(f"LLM API response status: {response.status_code}")
107
- for chunk in response.iter_content(chunk_size=1024):
108
- if chunk:
109
- yield chunk
110
- logger.info("Finished streaming response")
111
-
112
- return Response(generate(), content_type='application/octet-stream')
 
 
 
 
 
 
 
 
113
 
114
  # Convert the Flask app to an ASGI app
115
  asgi_app = WsgiToAsgi(app)
 
1
  from asgiref.wsgi import WsgiToAsgi
2
+ from flask import Flask, request, Response, redirect
3
  import requests
4
  import json
5
  import random
 
53
  logger.info(f"Creating JWT for user: {github_user_login}")
54
  return jwt.encode(payload, 'llm-secret', algorithm='HS256')
55
 
56
+ @app.before_request
57
+ def before_request():
58
+ logger.info(f"Received request: {request.method} {request.url}")
59
+ logger.info(f"Request headers: {request.headers}")
60
+ if request.data:
61
+ logger.info(f"Request body: {request.get_data(as_text=True)}")
62
+
63
+ if not request.url.startswith('https') and not request.url.startswith('http://localhost'):
64
+ url = request.url.replace('http://', 'https://', 1)
65
+ code = 301
66
+ logger.info(f"Redirecting to HTTPS: {url}")
67
+ return redirect(url, code=code)
68
+
69
+ @app.route('/')
70
+ def root():
71
+ logger.info("Received request to root path")
72
+ return "Welcome to the chat completion API", 200
73
+
74
  @app.route('/chat/completions', methods=['POST'])
75
  async def chat():
76
  """
77
  Handle chat completion requests.
78
  """
79
+ logger.info("Processing chat completion request")
80
+
81
+ try:
82
+ # Get the payload from the request
83
+ payload = request.json
84
+ logger.info(f"Request payload: {payload}")
85
+
86
+ # Get the model from the payload, defaulting to "claude-3-5-sonnet-20240620"
87
+ model = payload.get('model', 'claude-3-5-sonnet-20240620')
88
+ logger.info(f"Using model: {model}")
89
+
90
+ # Prepare the request for the LLM API
91
+ url = "https://llm.zed.dev/completion?"
92
+
93
+ llm_payload = {
94
+ "provider": "anthropic",
 
95
  "model": model,
96
+ "provider_request": {
97
+ "model": model,
98
+ "max_tokens": payload.get('max_tokens', 8192),
99
+ "temperature": payload.get('temperature', 0),
100
+ "top_p": payload.get('top_p', 0.7),
101
+ "messages": payload['messages'],
102
+ "system": ""
103
+ }
104
  }
 
105
 
106
+ github_username, zed_user_id = get_github_username_zed_userid_list()[0]
107
+ jwt_token = create_jwt(github_username, zed_user_id)
108
 
109
+ headers = {
110
+ 'Host': 'llm.zed.dev',
111
+ 'accept': '*/*',
112
+ 'content-type': 'application/json',
113
+ 'authorization': f'Bearer {jwt_token}',
114
+ 'user-agent': 'Zed/0.149.3 (macos; aarch64)'
115
+ }
116
 
117
+ # Get proxy from environment variable
118
+ proxy = os.environ.get('HTTP_PROXY', None)
119
+ proxies = {'http': proxy, 'https': proxy} if proxy else None
120
+ logger.info(f"Using proxy: {proxy}")
121
+
122
+ async def generate():
123
+ logger.info("Starting to stream response")
124
+ try:
125
+ with requests.post(url, headers=headers, json=llm_payload, stream=True, proxies=proxies) as response:
126
+ logger.info(f"LLM API response status: {response.status_code}")
127
+ for chunk in response.iter_content(chunk_size=1024):
128
+ if chunk:
129
+ yield chunk
130
+ except requests.RequestException as e:
131
+ logger.error(f"Error during LLM API request: {e}")
132
+ yield json.dumps({"error": "Internal server error"}).encode('utf-8')
133
+ logger.info("Finished streaming response")
134
+
135
+ return Response(generate(), content_type='application/octet-stream')
136
+
137
+ except Exception as e:
138
+ logger.error(f"Error processing request: {e}")
139
+ return json.dumps({"error": "Internal server error"}), 500, {'Content-Type': 'application/json'}
140
 
141
  # Convert the Flask app to an ASGI app
142
  asgi_app = WsgiToAsgi(app)