mistpe commited on
Commit
4eec083
1 Parent(s): 3bc6d83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -112
app.py CHANGED
@@ -1,134 +1,84 @@
1
  import os
2
- from flask import Flask, request, abort
3
- import hashlib
4
- import time
5
- import xmltodict
 
6
  from openai import OpenAI
7
- import re
8
- import base64
9
- import requests
10
 
11
  app = Flask(__name__)
12
 
13
- # 环境变量配置
14
  TOKEN = os.getenv("TOKEN")
15
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
16
- APPID = os.getenv("APPID")
17
- APPSECRET = os.getenv("APPSECRET")
18
 
19
- client = OpenAI(api_key=OPENAI_API_KEY)
 
20
 
21
- # 用户模型存储
22
- user_models = {}
23
-
24
- def check_signature():
25
- signature = request.args.get('signature', '')
26
- timestamp = request.args.get('timestamp', '')
27
- nonce = request.args.get('nonce', '')
28
- token = TOKEN
29
- tmp_list = sorted([token, timestamp, nonce])
30
- tmp_str = ''.join(tmp_list)
31
- hash_obj = hashlib.sha1(tmp_str.encode('utf-8'))
32
- return hash_obj.hexdigest() == signature
33
-
34
- def split_message(message, max_length=500):
35
- """Split a message into chunks of max_length characters."""
36
- return [message[i:i+max_length] for i in range(0, len(message), max_length)]
37
-
38
- def get_openai_response(message, model="gpt-4o", image_url=None):
39
  try:
40
- messages = [
41
- {"role": "system", "content": "You are a helpful assistant."},
42
- {"role": "user", "content": message}
43
- ]
44
-
45
- if image_url:
46
- messages[1]["content"] = [
47
- {"type": "text", "text": message},
48
- {"type": "image_url", "image_url": {"url": image_url}}
49
- ]
50
-
51
- completion = client.chat.completions.create(
52
  model=model,
53
- messages=messages,
54
- max_tokens=300
55
  )
56
- return completion.choices[0].message.content
57
  except Exception as e:
58
- return f"Error: {str(e)}"
59
-
60
- def handle_text_message(from_user, to_user, content):
61
- if content.startswith('/'):
62
- # 处理命令
63
- parts = content.split()
64
- command = parts[0][1:]
65
- if command == 'help':
66
- return "Available commands: /help, /setmodel [model_name]"
67
- elif command == 'setmodel':
68
- if len(parts) > 1:
69
- model = parts[1]
70
- user_models[from_user] = model
71
- return f"Model switched to {model}"
72
- else:
73
- return "Please specify a model name. Usage: /setmodel [model_name]"
74
- else:
75
- return "Unknown command. Type /help for available commands."
76
-
77
- # 正常对话,调用OpenAI API
78
- model = user_models.get(from_user, "gpt-4o")
79
- response = get_openai_response(content, model)
80
- return format_reply(from_user, to_user, response)
81
-
82
- def handle_image_message(from_user, to_user, pic_url):
83
- model = user_models.get(from_user, "gpt-4o")
84
- response = get_openai_response("What's in this image?", model, pic_url)
85
- return format_reply(from_user, to_user, response)
86
 
87
- def format_reply(from_user, to_user, content):
88
- response_parts = split_message(content)
89
- replies = []
90
- for part in response_parts:
91
- reply = f"""
92
- <xml>
93
- <ToUserName><![CDATA[{from_user}]]></ToUserName>
94
- <FromUserName><![CDATA[{to_user}]]></FromUserName>
95
- <CreateTime>{int(time.time())}</CreateTime>
96
- <MsgType><![CDATA[text]]></MsgType>
97
- <Content><![CDATA[{part}]]></Content>
98
- </xml>
99
- """
100
- replies.append(reply)
101
- return ''.join(replies)
102
 
103
  @app.route('/', methods=['GET', 'POST'])
104
  def wechat():
105
  if request.method == 'GET':
106
- echostr = request.args.get('echostr', '')
107
- if check_signature():
108
- return echostr
109
- abort(403)
110
- elif request.method == 'POST':
111
- if not check_signature():
112
- abort(403)
113
-
114
- xml_data = request.data
115
- msg = xmltodict.parse(xml_data)['xml']
116
- msg_type = msg['MsgType']
117
- from_user = msg['FromUserName']
118
- to_user = msg['ToUserName']
119
 
120
- if msg_type == 'text':
121
- content = msg['Content']
122
- return handle_text_message(from_user, to_user, content)
123
- elif msg_type == 'image':
124
- pic_url = msg['PicUrl']
125
- return handle_image_message(from_user, to_user, pic_url)
126
- elif msg_type == 'event':
127
- event = msg['Event']
128
- if event == 'subscribe':
129
- return format_reply(from_user, to_user, "感谢关注!输入 /help 查看可用命令。")
130
 
131
- return 'success'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  if __name__ == '__main__':
134
  app.run(host='0.0.0.0', port=7860)
 
1
  import os
2
+ from flask import Flask, request, make_response
3
+ import xml.etree.ElementTree as ET
4
+ from wechatpy import parse_message, create_reply
5
+ from wechatpy.utils import check_signature
6
+ from wechatpy.exceptions import InvalidSignatureException
7
  from openai import OpenAI
 
 
 
8
 
9
  app = Flask(__name__)
10
 
11
+ # 配置
12
  TOKEN = os.getenv("TOKEN")
13
+ API_KEY = os.getenv("OPENAI_API_KEY")
14
+ BASE_URL = os.getenv("OPENAI_BASE_URL")
15
+ client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
16
 
17
+ # 存储用户会话信息
18
+ user_sessions = {}
19
 
20
+ def get_openai_response(messages, model="gpt-3.5-turbo"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  try:
22
+ response = client.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
23
  model=model,
24
+ messages=messages
 
25
  )
26
+ return response.choices[0].message.content
27
  except Exception as e:
28
+ print(f"调用OpenAI API时出错: {str(e)}")
29
+ return "抱歉,我遇到了一些问题,无法回答您的问题。"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ def split_message(message, max_length=500):
32
+ return [message[i:i+max_length] for i in range(0, len(message), max_length)]
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  @app.route('/', methods=['GET', 'POST'])
35
  def wechat():
36
  if request.method == 'GET':
37
+ token = TOKEN
38
+ query = request.args
39
+ signature = query.get('signature', '')
40
+ timestamp = query.get('timestamp', '')
41
+ nonce = query.get('nonce', '')
42
+ echostr = query.get('echostr', '')
43
+ try:
44
+ check_signature(token, signature, timestamp, nonce)
45
+ except InvalidSignatureException:
46
+ return 'Invalid signature'
47
+ return echostr
 
 
48
 
49
+ if request.method == 'POST':
50
+ xml_str = request.data
51
+ if not xml_str:
52
+ return ""
 
 
 
 
 
 
53
 
54
+ msg = parse_message(xml_str)
55
+ if msg.type == 'text':
56
+ user_id = msg.source
57
+ content = msg.content
58
+
59
+ if content.startswith('/model'):
60
+ # 切换模型
61
+ model = content.split(' ')[1]
62
+ user_sessions[user_id] = {'model': model, 'messages': []}
63
+ return create_reply(f'模型已切换为 {model}', msg).render()
64
+
65
+ if user_id not in user_sessions:
66
+ user_sessions[user_id] = {'model': 'gpt-3.5-turbo', 'messages': []}
67
+
68
+ session = user_sessions[user_id]
69
+ session['messages'].append({"role": "user", "content": content})
70
+
71
+ response = get_openai_response(session['messages'], session['model'])
72
+ session['messages'].append({"role": "assistant", "content": response})
73
+
74
+ # 分割长消息
75
+ response_parts = split_message(response)
76
+ for part in response_parts[:-1]:
77
+ create_reply(part, msg).render()
78
+
79
+ return create_reply(response_parts[-1], msg).render()
80
+
81
+ return create_reply('Sorry, can not handle this for now', msg).render()
82
 
83
  if __name__ == '__main__':
84
  app.run(host='0.0.0.0', port=7860)