mistpe commited on
Commit
ca8ceea
1 Parent(s): 2492fd1

Create app-first.py

Browse files
Files changed (1) hide show
  1. app-first.py +140 -0
app-first.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ from flask import Flask, request, make_response
4
+ import hashlib
5
+ import time
6
+ import xml.etree.ElementTree as ET
7
+ import os
8
+ from openai import OpenAI
9
+ from dotenv import load_dotenv
10
+
11
+ # 加载环境变量
12
+ load_dotenv()
13
+
14
+ app = Flask(__name__)
15
+
16
+ # 配置
17
+ TOKEN = os.getenv('TOKEN')
18
+ API_KEY = os.getenv("API_KEY")
19
+ BASE_URL = os.getenv("OPENAI_BASE_URL")
20
+ client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
21
+
22
+ # 定义可用的模型列表
23
+ AVAILABLE_MODELS = {
24
+ 'gpt-3.5-turbo': 'GPT-3.5 Turbo',
25
+ 'gpt-4o': 'GPT-4o',
26
+ 'gpt-4o-mini': 'GPT-4o-mini',
27
+ }
28
+
29
+ # 存储用户会话信息
30
+ user_sessions = {}
31
+
32
+ def verify_wechat(request):
33
+ # 获取微信服务器发送过来的参数
34
+ data = request.args
35
+ signature = data.get('signature')
36
+ timestamp = data.get('timestamp')
37
+ nonce = data.get('nonce')
38
+ echostr = data.get('echostr')
39
+
40
+ # 对参数进行字典排序,拼接字符串
41
+ temp = [timestamp, nonce, TOKEN]
42
+ temp.sort()
43
+ temp = ''.join(temp)
44
+
45
+ # 加密
46
+ if (hashlib.sha1(temp.encode('utf8')).hexdigest() == signature):
47
+ return echostr
48
+ else:
49
+ return 'error', 403
50
+
51
+ def getUserMessageContentFromXML(xml_content):
52
+ # 解析XML字符串
53
+ root = ET.fromstring(xml_content)
54
+ # 提取数据
55
+ content = root.find('Content').text
56
+ from_user_name = root.find('FromUserName').text
57
+ to_user_name = root.find('ToUserName').text
58
+ return content, from_user_name, to_user_name
59
+
60
+ def generate_response_xml(from_user_name, to_user_name, output_content):
61
+ output_xml = '''
62
+ <xml>
63
+ <ToUserName><![CDATA[%s]]></ToUserName>
64
+ <FromUserName><![CDATA[%s]]></FromUserName>
65
+ <CreateTime>%s</CreateTime>
66
+ <MsgType><![CDATA[text]]></MsgType>
67
+ <Content><![CDATA[%s]]></Content>
68
+ </xml>'''
69
+
70
+ response = make_response(output_xml % (from_user_name, to_user_name, str(int(time.time())), output_content))
71
+ response.content_type = 'application/xml'
72
+ return response
73
+
74
+ def get_openai_response(messages, model="gpt-4o-mini"):
75
+ try:
76
+ response = client.chat.completions.create(
77
+ model=model,
78
+ messages=messages
79
+ )
80
+ return response.choices[0].message.content
81
+ except Exception as e:
82
+ print(f"调用OpenAI API时出错: {str(e)}")
83
+ return "抱歉,我遇到了一些问题,无法回答您的问题。"
84
+
85
+ def split_message(message, max_length=500):
86
+ return [message[i:i+max_length] for i in range(0, len(message), max_length)]
87
+
88
+ def list_available_models():
89
+ return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
90
+
91
+ @app.route('/api/wx', methods=['GET', 'POST'])
92
+ def wechatai():
93
+ if request.method == 'GET':
94
+ return verify_wechat(request)
95
+ else:
96
+ # 处理POST请求
97
+ print("user request data: ", request.data)
98
+ user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(request.data)
99
+ print("user message content: ", user_message_content)
100
+
101
+ if user_message_content.lower() == '/models':
102
+ response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
103
+ elif user_message_content.lower().startswith('/model'):
104
+ model = user_message_content.split(' ')[1]
105
+ if model in AVAILABLE_MODELS:
106
+ user_sessions[from_user_name] = {'model': model, 'messages': [], 'pending_response': []}
107
+ response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
108
+ else:
109
+ response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
110
+ elif user_message_content.lower() == '继续':
111
+ if from_user_name in user_sessions and user_sessions[from_user_name]['pending_response']:
112
+ response_content = user_sessions[from_user_name]['pending_response'].pop(0)
113
+ if user_sessions[from_user_name]['pending_response']:
114
+ response_content += '\n\n回复"继续"获取下一部分。'
115
+ else:
116
+ response_content += '\n\n回复结束。'
117
+ else:
118
+ response_content = "没有待发送的消息。"
119
+ else:
120
+ if from_user_name not in user_sessions:
121
+ user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
122
+
123
+ session = user_sessions[from_user_name]
124
+ session['messages'].append({"role": "user", "content": user_message_content})
125
+
126
+ gpt_response = get_openai_response(session['messages'], session['model'])
127
+ session['messages'].append({"role": "assistant", "content": gpt_response})
128
+
129
+ response_parts = split_message(gpt_response)
130
+
131
+ if len(response_parts) > 1:
132
+ response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
133
+ session['pending_response'] = response_parts[1:]
134
+ else:
135
+ response_content = response_parts[0]
136
+
137
+ return generate_response_xml(from_user_name, to_user_name, response_content)
138
+
139
+ if __name__ == '__main__':
140
+ app.run(host='0.0.0.0', port=7860, debug=True)