File size: 2,889 Bytes
e7c16da 4eec083 3bc6d83 8a9b05e e7c16da 4eec083 f5b7afa 4eec083 e7c16da 4eec083 3bc6d83 8422462 e7c16da 4eec083 e7c16da 4eec083 e7c16da 4eec083 e7c16da 4eec083 3bc6d83 4eec083 3bc6d83 e7c16da 4eec083 e7c16da 4eec083 3bc6d83 4eec083 8422462 4eec083 8a9b05e e7c16da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import os
from flask import Flask, request, make_response
import xml.etree.ElementTree as ET
from wechatpy import parse_message, create_reply
from wechatpy.utils import check_signature
from wechatpy.exceptions import InvalidSignatureException
from openai import OpenAI
app = Flask(__name__)
# 配置
TOKEN = os.getenv("TOKEN")
API_KEY = os.getenv("OPENAI_API_KEY")
BASE_URL = os.getenv("OPENAI_BASE_URL")
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
# 存储用户会话信息
user_sessions = {}
def get_openai_response(messages, model="gpt-4o-mini"):
try:
response = client.chat.completions.create(
model=model,
messages=messages
)
return response.choices[0].message.content
except Exception as e:
print(f"调用OpenAI API时出错: {str(e)}")
return "抱歉,我遇到了一些问题,无法回答您的问题。"
def split_message(message, max_length=500):
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
@app.route('/', methods=['GET', 'POST'])
def wechat():
if request.method == 'GET':
token = TOKEN
query = request.args
signature = query.get('signature', '')
timestamp = query.get('timestamp', '')
nonce = query.get('nonce', '')
echostr = query.get('echostr', '')
try:
check_signature(token, signature, timestamp, nonce)
except InvalidSignatureException:
return 'Invalid signature'
return echostr
if request.method == 'POST':
xml_str = request.data
if not xml_str:
return ""
msg = parse_message(xml_str)
if msg.type == 'text':
user_id = msg.source
content = msg.content
if content.startswith('/model'):
# 切换模型
model = content.split(' ')[1]
user_sessions[user_id] = {'model': model, 'messages': []}
return create_reply(f'模型已切换为 {model}', msg).render()
if user_id not in user_sessions:
user_sessions[user_id] = {'model': 'gpt-4o-mini', 'messages': []}
session = user_sessions[user_id]
session['messages'].append({"role": "user", "content": content})
response = get_openai_response(session['messages'], session['model'])
session['messages'].append({"role": "assistant", "content": response})
# 分割长消息
response_parts = split_message(response)
for part in response_parts[:-1]:
create_reply(part, msg).render()
return create_reply(response_parts[-1], msg).render()
return create_reply('Sorry, can not handle this for now', msg).render()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860) |