mistpe commited on
Commit
54fc02c
1 Parent(s): 7a1d4dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -20
app.py CHANGED
@@ -289,12 +289,83 @@ def split_message(message, max_length=500):
289
  # response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
290
 
291
  # return generate_response_xml(from_user_name, to_user_name, response_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  @app.route('/api/wx', methods=['GET', 'POST'])
293
  def wechatai():
294
  if request.method == 'GET':
295
  return verify_wechat(request)
296
  else:
297
- # 处理POST请求
298
  xml_str = request.data
299
  if not xml_str:
300
  return ""
@@ -302,10 +373,11 @@ def wechatai():
302
  user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
303
 
304
  if from_user_name not in user_sessions:
305
- user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
306
 
307
  session = user_sessions[from_user_name]
308
 
 
309
  if user_message_content.lower() == '/models':
310
  response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
311
  return generate_response_xml(from_user_name, to_user_name, response_content)
@@ -327,29 +399,48 @@ def wechatai():
327
  else:
328
  response_content = "没有待发送的消息。"
329
  return generate_response_xml(from_user_name, to_user_name, response_content)
330
-
331
  session['messages'].append({"role": "user", "content": user_message_content})
 
 
 
 
 
 
 
332
 
333
- # 调用OpenAI API
334
- ai_response = get_openai_response(session['messages'], model=session['model'], functions=FUNCTIONS, function_call="auto")
335
 
336
- if ai_response.function_call:
337
- function_name = ai_response.function_call.name
338
- function_args = json.loads(ai_response.function_call.arguments)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  function_result = process_function_call(function_name, function_args)
340
 
341
- session['messages'].append(ai_response.model_dump())
342
- session['messages'].append({
343
- "role": "function",
344
- "name": function_name,
345
- "content": json.dumps(function_result, ensure_ascii=False)
346
- })
347
-
348
- # 再次调用OpenAI API,将函数执行结果作为上下文
349
- final_response = get_openai_response(session['messages'], model=session['model'])
350
- response_content = final_response.content
351
- else:
352
- response_content = ai_response.content
353
 
354
  session['messages'].append({"role": "assistant", "content": response_content})
355
 
 
289
  # response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
290
 
291
  # return generate_response_xml(from_user_name, to_user_name, response_content)
292
+ # @app.route('/api/wx', methods=['GET', 'POST'])
293
+ # def wechatai():
294
+ # if request.method == 'GET':
295
+ # return verify_wechat(request)
296
+ # else:
297
+ # # 处理POST请求
298
+ # xml_str = request.data
299
+ # if not xml_str:
300
+ # return ""
301
+
302
+ # user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
303
+
304
+ # if from_user_name not in user_sessions:
305
+ # user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
306
+
307
+ # session = user_sessions[from_user_name]
308
+
309
+ # if user_message_content.lower() == '/models':
310
+ # response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
311
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
312
+ # elif user_message_content.lower().startswith('/model'):
313
+ # model = user_message_content.split(' ')[1]
314
+ # if model in AVAILABLE_MODELS:
315
+ # session['model'] = model
316
+ # response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
317
+ # else:
318
+ # response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
319
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
320
+ # elif user_message_content.lower() == '继续':
321
+ # if session['pending_response']:
322
+ # response_content = session['pending_response'].pop(0)
323
+ # if session['pending_response']:
324
+ # response_content += '\n\n回复"继续"获取下一部分。'
325
+ # else:
326
+ # response_content += '\n\n回复结束。'
327
+ # else:
328
+ # response_content = "没有待发送的消息。"
329
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
330
+
331
+ # session['messages'].append({"role": "user", "content": user_message_content})
332
+
333
+ # # 调用OpenAI API
334
+ # ai_response = get_openai_response(session['messages'], model=session['model'], functions=FUNCTIONS, function_call="auto")
335
+
336
+ # if ai_response.function_call:
337
+ # function_name = ai_response.function_call.name
338
+ # function_args = json.loads(ai_response.function_call.arguments)
339
+ # function_result = process_function_call(function_name, function_args)
340
+
341
+ # session['messages'].append(ai_response.model_dump())
342
+ # session['messages'].append({
343
+ # "role": "function",
344
+ # "name": function_name,
345
+ # "content": json.dumps(function_result, ensure_ascii=False)
346
+ # })
347
+
348
+ # # 再次调用OpenAI API,将函数执行结果作为上下文
349
+ # final_response = get_openai_response(session['messages'], model=session['model'])
350
+ # response_content = final_response.content
351
+ # else:
352
+ # response_content = ai_response.content
353
+
354
+ # session['messages'].append({"role": "assistant", "content": response_content})
355
+
356
+ # # 处理长消息
357
+ # response_parts = split_message(response_content)
358
+ # if len(response_parts) > 1:
359
+ # session['pending_response'] = response_parts[1:]
360
+ # response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
361
+
362
+ # return generate_response_xml(from_user_name, to_user_name, response_content)
363
+
364
  @app.route('/api/wx', methods=['GET', 'POST'])
365
  def wechatai():
366
  if request.method == 'GET':
367
  return verify_wechat(request)
368
  else:
 
369
  xml_str = request.data
370
  if not xml_str:
371
  return ""
 
373
  user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
374
 
375
  if from_user_name not in user_sessions:
376
+ user_sessions[from_user_name] = {'model': 'gpt-3.5-turbo', 'messages': [], 'pending_response': []}
377
 
378
  session = user_sessions[from_user_name]
379
 
380
+ # 处理特殊命令
381
  if user_message_content.lower() == '/models':
382
  response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
383
  return generate_response_xml(from_user_name, to_user_name, response_content)
 
399
  else:
400
  response_content = "没有待发送的消息。"
401
  return generate_response_xml(from_user_name, to_user_name, response_content)
402
+
403
  session['messages'].append({"role": "user", "content": user_message_content})
404
+ messages = session['messages']
405
+
406
+ # 次级模型1: 处理搜索相关函数
407
+ sub_model_1_response = get_openai_response(messages, model=session['model'], functions=FUNCTIONS_GROUP_1, function_call="auto")
408
+
409
+ # 次级模型2: 处理邮件发送相关函数
410
+ sub_model_2_response = get_openai_response(messages, model=session['model'], functions=FUNCTIONS_GROUP_2, function_call="auto")
411
 
412
+ function_call_1 = sub_model_1_response.function_call if sub_model_1_response and sub_model_1_response.function_call else None
413
+ function_call_2 = sub_model_2_response.function_call if sub_model_2_response and sub_model_2_response.function_call else None
414
 
415
+ final_function_call = None
416
+
417
+ if function_call_1 and function_call_2:
418
+ # 裁决模型: 决定使用哪个函数调用
419
+ arbitration_messages = messages + [
420
+ {"role": "system", "content": "两个次级模型都建议使用函数。请决定使用哪个函数更合适。"},
421
+ {"role": "assistant", "content": f"次级模型1建议使用函数:{function_call_1.name}"},
422
+ {"role": "assistant", "content": f"次级模型2建议使用函数:{function_call_2.name}"}
423
+ ]
424
+ arbitration_response = get_openai_response(arbitration_messages, model=session['model'])
425
+ if arbitration_response and ("模型1" in arbitration_response.content or function_call_1.name in arbitration_response.content):
426
+ final_function_call = function_call_1
427
+ else:
428
+ final_function_call = function_call_2
429
+ elif function_call_1:
430
+ final_function_call = function_call_1
431
+ elif function_call_2:
432
+ final_function_call = function_call_2
433
+
434
+ if final_function_call:
435
+ function_name = final_function_call.name
436
+ function_args = json.loads(final_function_call.arguments)
437
  function_result = process_function_call(function_name, function_args)
438
 
439
+ messages.append({"role": "function", "name": function_name, "content": json.dumps(function_result, ensure_ascii=False)})
440
+
441
+ # 主模型: 生成最终回复
442
+ final_response = get_openai_response(messages, model=session['model'])
443
+ response_content = final_response.content if final_response else "Error occurred"
 
 
 
 
 
 
 
444
 
445
  session['messages'].append({"role": "assistant", "content": response_content})
446