File size: 3,913 Bytes
c3c93e2
 
4eec083
c3c93e2
 
 
 
3bc6d83
a4ae7b7
 
 
 
8a9b05e
 
e7c16da
4eec083
c3c93e2
4eec083
 
 
e7c16da
a4ae7b7
 
 
 
 
 
 
4eec083
 
3bc6d83
c3c93e2
 
 
 
 
 
 
 
 
 
 
 
a4ae7b7
c3c93e2
 
3bc6d83
4eec083
 
3bc6d83
a4ae7b7
 
 
c3c93e2
 
e7c16da
c3c93e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4ae7b7
c3c93e2
 
 
 
 
 
 
 
 
4eec083
c3c93e2
 
4eec083
c3c93e2
4eec083
c3c93e2
 
 
 
 
4eec083
c3c93e2
8a9b05e
 
c3c93e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, request, make_response
import hashlib
import time
import xml.etree.ElementTree as ET
import os
from openai import OpenAI
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

app = Flask(__name__)

# 配置
TOKEN = os.getenv('WECHAT_TOKEN')
API_KEY = os.getenv("OPENAI_API_KEY")
BASE_URL = os.getenv("OPENAI_BASE_URL")
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)

# 定义可用的模型列表
AVAILABLE_MODELS = {
    'gpt-3.5-turbo': 'GPT-3.5 Turbo',
    'gpt-4': 'GPT-4',
    'gpt-4-turbo': 'GPT-4 Turbo',
}

# 存储用户会话信息
user_sessions = {}

def verify_wechat(request):
    # 验证逻辑保持不变
    ...

def getUserMessageContentFromXML(xml_content):
    # XML解析逻辑保持不变
    ...

def generate_response_xml(from_user_name, to_user_name, output_content):
    # XML生成逻辑保持不变
    ...

def get_openai_response(messages, model="gpt-3.5-turbo"):
    # OpenAI API调用逻辑保持不变
    ...

def split_message(message, max_length=500):
    return [message[i:i+max_length] for i in range(0, len(message), max_length)]

def list_available_models():
    return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])

@app.route('/api/wx', methods=['GET', 'POST'])
def wechatai():
    if request.method == 'GET':
        return verify_wechat(request)
    else:
        # 处理POST请求
        print("user request data: ", request.data)
        user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(request.data)
        print("user message content: ", user_message_content)

        if user_message_content.lower() == '/models':
            response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型"
        elif user_message_content.lower().startswith('/model'):
            model = user_message_content.split(' ')[1]
            if model in AVAILABLE_MODELS:
                user_sessions[from_user_name] = {'model': model, 'messages': [], 'pending_response': []}
                response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}'
            else:
                response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}'
        elif user_message_content.lower() == '继续':
            if from_user_name in user_sessions and user_sessions[from_user_name]['pending_response']:
                response_content = user_sessions[from_user_name]['pending_response'].pop(0)
                if user_sessions[from_user_name]['pending_response']:
                    response_content += "\n\n回复"继续"获取下一部分。"
                else:
                    response_content += "\n\n回复结束。"
            else:
                response_content = "没有待发送的消息。"
        else:
            if from_user_name not in user_sessions:
                user_sessions[from_user_name] = {'model': 'gpt-3.5-turbo', 'messages': [], 'pending_response': []}

            session = user_sessions[from_user_name]
            session['messages'].append({"role": "user", "content": user_message_content})
            
            gpt_response = get_openai_response(session['messages'], session['model'])
            session['messages'].append({"role": "assistant", "content": gpt_response})

            response_parts = split_message(gpt_response)
            
            if len(response_parts) > 1:
                response_content = response_parts[0] + "\n\n回复"继续"获取下一部分。"
                session['pending_response'] = response_parts[1:]
            else:
                response_content = response_parts[0]

        return generate_response_xml(from_user_name, to_user_name, response_content)

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860, debug=True)