mistpe commited on
Commit
92a6e67
1 Parent(s): 338d1f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -2
app.py CHANGED
@@ -219,6 +219,76 @@ def process_function_call(function_name, function_args):
219
  def split_message(message, max_length=500):
220
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  @app.route('/api/wx', methods=['GET', 'POST'])
223
  def wechatai():
224
  if request.method == 'GET':
@@ -232,7 +302,7 @@ def wechatai():
232
  user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
233
 
234
  if from_user_name not in user_sessions:
235
- user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
236
 
237
  session = user_sessions[from_user_name]
238
 
@@ -275,6 +345,7 @@ def wechatai():
275
  "content": json.dumps(function_result, ensure_ascii=False)
276
  })
277
 
 
278
  final_response = get_openai_response(session['messages'], model=session['model'])
279
  response_content = final_response.content
280
  else:
@@ -289,7 +360,7 @@ def wechatai():
289
  response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
290
 
291
  return generate_response_xml(from_user_name, to_user_name, response_content)
292
-
293
  def list_available_models():
294
  return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
295
 
 
219
  def split_message(message, max_length=500):
220
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
221
 
222
+ # @app.route('/api/wx', methods=['GET', 'POST'])
223
+ # def wechatai():
224
+ # if request.method == 'GET':
225
+ # return verify_wechat(request)
226
+ # else:
227
+ # # 处理POST请求
228
+ # xml_str = request.data
229
+ # if not xml_str:
230
+ # return ""
231
+
232
+ # user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
233
+
234
+ # if from_user_name not in user_sessions:
235
+ # user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
236
+
237
+ # session = user_sessions[from_user_name]
238
+
239
+ # if user_message_content.lower() == '/models':
240
+ # response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
241
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
242
+ # elif user_message_content.lower().startswith('/model'):
243
+ # model = user_message_content.split(' ')[1]
244
+ # if model in AVAILABLE_MODELS:
245
+ # session['model'] = model
246
+ # response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
247
+ # else:
248
+ # response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
249
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
250
+ # elif user_message_content.lower() == '继续':
251
+ # if session['pending_response']:
252
+ # response_content = session['pending_response'].pop(0)
253
+ # if session['pending_response']:
254
+ # response_content += '\n\n回复"继续"获取下一部分。'
255
+ # else:
256
+ # response_content += '\n\n回复结束。'
257
+ # else:
258
+ # response_content = "没有待发送的消息。"
259
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
260
+
261
+ # session['messages'].append({"role": "user", "content": user_message_content})
262
+
263
+ # # 调用OpenAI API
264
+ # ai_response = get_openai_response(session['messages'], model=session['model'], functions=FUNCTIONS, function_call="auto")
265
+
266
+ # if ai_response.function_call:
267
+ # function_name = ai_response.function_call.name
268
+ # function_args = json.loads(ai_response.function_call.arguments)
269
+ # function_result = process_function_call(function_name, function_args)
270
+
271
+ # session['messages'].append(ai_response.model_dump())
272
+ # session['messages'].append({
273
+ # "role": "function",
274
+ # "name": function_name,
275
+ # "content": json.dumps(function_result, ensure_ascii=False)
276
+ # })
277
+
278
+ # final_response = get_openai_response(session['messages'], model=session['model'])
279
+ # response_content = final_response.content
280
+ # else:
281
+ # response_content = ai_response.content
282
+
283
+ # session['messages'].append({"role": "assistant", "content": response_content})
284
+
285
+ # # 处理长消息
286
+ # response_parts = split_message(response_content)
287
+ # if len(response_parts) > 1:
288
+ # session['pending_response'] = response_parts[1:]
289
+ # response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
290
+
291
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
292
  @app.route('/api/wx', methods=['GET', 'POST'])
293
  def wechatai():
294
  if request.method == 'GET':
 
302
  user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
303
 
304
  if from_user_name not in user_sessions:
305
+ user_sessions[from_user_name] = {'model': 'gpt-3.5-turbo', 'messages': [], 'pending_response': []}
306
 
307
  session = user_sessions[from_user_name]
308
 
 
345
  "content": json.dumps(function_result, ensure_ascii=False)
346
  })
347
 
348
+ # 再次调用OpenAI API,将函数执行结果作为上下文
349
  final_response = get_openai_response(session['messages'], model=session['model'])
350
  response_content = final_response.content
351
  else:
 
360
  response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
361
 
362
  return generate_response_xml(from_user_name, to_user_name, response_content)
363
+
364
  def list_available_models():
365
  return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
366