mistpe commited on
Commit
8008fbb
1 Parent(s): ca8ceea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -34
app.py CHANGED
@@ -5,8 +5,14 @@ import hashlib
5
  import time
6
  import xml.etree.ElementTree as ET
7
  import os
 
8
  from openai import OpenAI
9
  from dotenv import load_dotenv
 
 
 
 
 
10
 
11
  # 加载环境变量
12
  load_dotenv()
@@ -14,21 +20,118 @@ load_dotenv()
14
  app = Flask(__name__)
15
 
16
  # 配置
17
- TOKEN = os.getenv('TOKEN')
18
- API_KEY = os.getenv("API_KEY")
19
  BASE_URL = os.getenv("OPENAI_BASE_URL")
 
20
  client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
21
 
22
  # 定义可用的模型列表
23
  AVAILABLE_MODELS = {
24
  'gpt-3.5-turbo': 'GPT-3.5 Turbo',
25
- 'gpt-4o': 'GPT-4o',
26
- 'gpt-4o-mini': 'GPT-4o-mini',
27
  }
28
 
29
  # 存储用户会话信息
30
  user_sessions = {}
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def verify_wechat(request):
33
  # 获取微信服务器发送过来的参数
34
  data = request.args
@@ -71,70 +174,124 @@ def generate_response_xml(from_user_name, to_user_name, output_content):
71
  response.content_type = 'application/xml'
72
  return response
73
 
74
- def get_openai_response(messages, model="gpt-4o-mini"):
75
  try:
76
  response = client.chat.completions.create(
77
  model=model,
78
- messages=messages
 
 
79
  )
80
- return response.choices[0].message.content
81
  except Exception as e:
82
  print(f"调用OpenAI API时出错: {str(e)}")
83
- return "抱歉,我遇到了一些问题,无法回答您的问题。"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  def split_message(message, max_length=500):
86
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
87
 
88
- def list_available_models():
89
- return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
90
-
91
  @app.route('/api/wx', methods=['GET', 'POST'])
92
  def wechatai():
93
  if request.method == 'GET':
94
  return verify_wechat(request)
95
  else:
96
  # 处理POST请求
97
- print("user request data: ", request.data)
98
- user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(request.data)
99
- print("user message content: ", user_message_content)
 
 
 
 
 
100
 
 
 
101
  if user_message_content.lower() == '/models':
102
  response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
 
103
  elif user_message_content.lower().startswith('/model'):
104
  model = user_message_content.split(' ')[1]
105
  if model in AVAILABLE_MODELS:
106
- user_sessions[from_user_name] = {'model': model, 'messages': [], 'pending_response': []}
107
  response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
108
  else:
109
  response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
 
110
  elif user_message_content.lower() == '继续':
111
- if from_user_name in user_sessions and user_sessions[from_user_name]['pending_response']:
112
- response_content = user_sessions[from_user_name]['pending_response'].pop(0)
113
- if user_sessions[from_user_name]['pending_response']:
114
  response_content += '\n\n回复"继续"获取下一部分。'
115
  else:
116
  response_content += '\n\n回复结束。'
117
  else:
118
  response_content = "没有待发送的消息。"
119
- else:
120
- if from_user_name not in user_sessions:
121
- user_sessions[from_user_name] = {'model': 'gpt-4o-mini', 'messages': [], 'pending_response': []}
122
-
123
- session = user_sessions[from_user_name]
124
- session['messages'].append({"role": "user", "content": user_message_content})
 
 
 
 
 
125
 
126
- gpt_response = get_openai_response(session['messages'], session['model'])
127
- session['messages'].append({"role": "assistant", "content": gpt_response})
128
-
129
- response_parts = split_message(gpt_response)
 
 
130
 
131
- if len(response_parts) > 1:
132
- response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
133
- session['pending_response'] = response_parts[1:]
134
- else:
135
- response_content = response_parts[0]
136
-
 
 
 
 
 
 
 
137
  return generate_response_xml(from_user_name, to_user_name, response_content)
138
 
 
 
 
139
  if __name__ == '__main__':
140
  app.run(host='0.0.0.0', port=7860, debug=True)
 
5
  import time
6
  import xml.etree.ElementTree as ET
7
  import os
8
+ import json
9
  from openai import OpenAI
10
  from dotenv import load_dotenv
11
+ from duckduckgo_search import DDGS
12
+ import requests
13
+ import smtplib
14
+ from email.mime.text import MIMEText
15
+ from email.mime.multipart import MIMEMultipart
16
 
17
  # 加载环境变量
18
  load_dotenv()
 
20
  app = Flask(__name__)
21
 
22
  # 配置
23
+ TOKEN = os.getenv('WECHAT_TOKEN')
24
+ API_KEY = os.getenv("OPENAI_API_KEY")
25
  BASE_URL = os.getenv("OPENAI_BASE_URL")
26
+ emailkey = os.getenv("EMAIL_KEY")
27
  client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
28
 
29
  # 定义可用的模型列表
30
  AVAILABLE_MODELS = {
31
  'gpt-3.5-turbo': 'GPT-3.5 Turbo',
32
+ 'gpt-4': 'GPT-4',
33
+ 'gpt-4-turbo': 'GPT-4 Turbo',
34
  }
35
 
36
  # 存储用户会话信息
37
  user_sessions = {}
38
 
39
+ # 定义函数
40
+ def search_duckduckgo(keywords):
41
+ search_term = " ".join(keywords)
42
+ with DDGS() as ddgs:
43
+ results = list(ddgs.text(keywords=search_term, region="cn-zh", safesearch="on", max_results=5))
44
+ return [{"title": result['title'], "body": result['body'].replace('\n', ' ')} for result in results]
45
+
46
+ def search_papers(query):
47
+ url = f"https://api.crossref.org/works?query={query}"
48
+ response = requests.get(url)
49
+ if response.status_code == 200:
50
+ data = response.json()
51
+ papers = data['message']['items']
52
+ processed_papers = []
53
+ for paper in papers:
54
+ processed_paper = {
55
+ "标题": paper.get('title', [''])[0],
56
+ "作者": ", ".join([f"{author.get('given', '')} {author.get('family', '')}" for author in paper.get('author', [])]),
57
+ "DOI": paper.get('DOI', ''),
58
+ "摘要": paper.get('abstract', '').replace('<p>', '').replace('</p>', '').replace('<italic>', '*').replace('</italic>', '*')
59
+ }
60
+ processed_papers.append(processed_paper)
61
+ return processed_papers
62
+ else:
63
+ return []
64
+
65
+ def send_email(to, subject, content):
66
+ try:
67
+ with smtplib.SMTP('106.15.184.28', 8025) as smtp:
68
+ smtp.login("jwt", emailkey)
69
+ message = MIMEMultipart()
70
+ message['From'] = "Me <[email protected]>"
71
+ message['To'] = to
72
+ message['Subject'] = subject
73
+ message.attach(MIMEText(content, 'html'))
74
+ smtp.sendmail("[email protected]", to, message.as_string())
75
+ return True
76
+ except Exception as e:
77
+ print(f"发送邮件时出错: {str(e)}")
78
+ return False
79
+
80
+ # 定义函数列表
81
+ FUNCTIONS = [
82
+ {
83
+ "name": "search_duckduckgo",
84
+ "description": "使用DuckDuckGo搜索引擎查询信息。可以搜索最新新闻、文章、博客等内容。",
85
+ "parameters": {
86
+ "type": "object",
87
+ "properties": {
88
+ "keywords": {
89
+ "type": "array",
90
+ "items": {"type": "string"},
91
+ "description": "搜索的关键词列表。例如:['Python', '机器学习', '最新进展']。"
92
+ }
93
+ },
94
+ "required": ["keywords"]
95
+ }
96
+ },
97
+ {
98
+ "name": "search_papers",
99
+ "description": "使用Crossref API搜索学术论文。",
100
+ "parameters": {
101
+ "type": "object",
102
+ "properties": {
103
+ "query": {
104
+ "type": "string",
105
+ "description": "搜索查询字符串。例如:'climate change'。"
106
+ }
107
+ },
108
+ "required": ["query"]
109
+ }
110
+ },
111
+ {
112
+ "name": "send_email",
113
+ "description": "发送电子邮件。",
114
+ "parameters": {
115
+ "type": "object",
116
+ "properties": {
117
+ "to": {
118
+ "type": "string",
119
+ "description": "收件人邮箱地址"
120
+ },
121
+ "subject": {
122
+ "type": "string",
123
+ "description": "邮件主题"
124
+ },
125
+ "content": {
126
+ "type": "string",
127
+ "description": "邮件内容"
128
+ }
129
+ },
130
+ "required": ["to", "subject", "content"]
131
+ }
132
+ }
133
+ ]
134
+
135
  def verify_wechat(request):
136
  # 获取微信服务器发送过来的参数
137
  data = request.args
 
174
  response.content_type = 'application/xml'
175
  return response
176
 
177
+ def get_openai_response(messages, model="gpt-3.5-turbo", functions=None, function_call=None):
178
  try:
179
  response = client.chat.completions.create(
180
  model=model,
181
+ messages=messages,
182
+ functions=functions,
183
+ function_call=function_call
184
  )
185
+ return response.choices[0].message
186
  except Exception as e:
187
  print(f"调用OpenAI API时出错: {str(e)}")
188
+ return None
189
+
190
+ def process_function_call(function_name, function_args):
191
+ if function_name == "search_duckduckgo":
192
+ keywords = function_args.get('keywords', [])
193
+ if not keywords:
194
+ return "搜索关键词为空,无法执行搜索。"
195
+ return search_duckduckgo(keywords)
196
+ elif function_name == "search_papers":
197
+ query = function_args.get('query', '')
198
+ if not query:
199
+ return "搜索查询为空,无法执行论文搜索。"
200
+ return search_papers(query)
201
+ elif function_name == "send_email":
202
+ to = function_args.get('to', '')
203
+ subject = function_args.get('subject', '')
204
+ content = function_args.get('content', '')
205
+ if not to or not subject or not content:
206
+ return "邮件信息不完整,无法发送邮件。"
207
+ success = send_email(to, subject, content)
208
+ return {
209
+ "success": success,
210
+ "message": "邮件发送成功" if success else "邮件发送失败",
211
+ "to": to,
212
+ "subject": subject,
213
+ "content": content,
214
+ "is_email": True
215
+ }
216
+ else:
217
+ return "未知的函数调用。"
218
 
219
  def split_message(message, max_length=500):
220
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
221
 
 
 
 
222
  @app.route('/api/wx', methods=['GET', 'POST'])
223
  def wechatai():
224
  if request.method == 'GET':
225
  return verify_wechat(request)
226
  else:
227
  # 处理POST请求
228
+ xml_str = request.data
229
+ if not xml_str:
230
+ return ""
231
+
232
+ user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(xml_str)
233
+
234
+ if from_user_name not in user_sessions:
235
+ user_sessions[from_user_name] = {'model': 'gpt-3.5-turbo', 'messages': [], 'pending_response': []}
236
 
237
+ session = user_sessions[from_user_name]
238
+
239
  if user_message_content.lower() == '/models':
240
  response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
241
+ return generate_response_xml(from_user_name, to_user_name, response_content)
242
  elif user_message_content.lower().startswith('/model'):
243
  model = user_message_content.split(' ')[1]
244
  if model in AVAILABLE_MODELS:
245
+ session['model'] = model
246
  response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
247
  else:
248
  response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
249
+ return generate_response_xml(from_user_name, to_user_name, response_content)
250
  elif user_message_content.lower() == '继续':
251
+ if session['pending_response']:
252
+ response_content = session['pending_response'].pop(0)
253
+ if session['pending_response']:
254
  response_content += '\n\n回复"继续"获取下一部分。'
255
  else:
256
  response_content += '\n\n回复结束。'
257
  else:
258
  response_content = "没有待发送的消息。"
259
+ return generate_response_xml(from_user_name, to_user_name, response_content)
260
+
261
+ session['messages'].append({"role": "user", "content": user_message_content})
262
+
263
+ # 调用OpenAI API
264
+ ai_response = get_openai_response(session['messages'], model=session['model'], functions=FUNCTIONS, function_call="auto")
265
+
266
+ if ai_response.function_call:
267
+ function_name = ai_response.function_call.name
268
+ function_args = json.loads(ai_response.function_call.arguments)
269
+ function_result = process_function_call(function_name, function_args)
270
 
271
+ session['messages'].append(ai_response.model_dump())
272
+ session['messages'].append({
273
+ "role": "function",
274
+ "name": function_name,
275
+ "content": json.dumps(function_result, ensure_ascii=False)
276
+ })
277
 
278
+ final_response = get_openai_response(session['messages'], model=session['model'])
279
+ response_content = final_response.content
280
+ else:
281
+ response_content = ai_response.content
282
+
283
+ session['messages'].append({"role": "assistant", "content": response_content})
284
+
285
+ # 处理长消息
286
+ response_parts = split_message(response_content)
287
+ if len(response_parts) > 1:
288
+ session['pending_response'] = response_parts[1:]
289
+ response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
290
+
291
  return generate_response_xml(from_user_name, to_user_name, response_content)
292
 
293
+ def list_available_models():
294
+ return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
295
+
296
  if __name__ == '__main__':
297
  app.run(host='0.0.0.0', port=7860, debug=True)