|
|
|
|
|
from flask import Flask, request, make_response |
|
import hashlib |
|
import time |
|
import xml.etree.ElementTree as ET |
|
import os |
|
from openai import OpenAI |
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
TOKEN = os.getenv('WECHAT_TOKEN') |
|
API_KEY = os.getenv("OPENAI_API_KEY") |
|
BASE_URL = os.getenv("OPENAI_BASE_URL") |
|
client = OpenAI(api_key=API_KEY, base_url=BASE_URL) |
|
|
|
|
|
AVAILABLE_MODELS = { |
|
'gpt-3.5-turbo': 'GPT-3.5 Turbo', |
|
'gpt-4': 'GPT-4', |
|
'gpt-4-turbo': 'GPT-4 Turbo', |
|
} |
|
|
|
|
|
user_sessions = {} |
|
|
|
def verify_wechat(request): |
|
|
|
... |
|
|
|
def getUserMessageContentFromXML(xml_content): |
|
|
|
... |
|
|
|
def generate_response_xml(from_user_name, to_user_name, output_content): |
|
|
|
... |
|
|
|
def get_openai_response(messages, model="gpt-3.5-turbo"): |
|
|
|
... |
|
|
|
def split_message(message, max_length=500): |
|
return [message[i:i+max_length] for i in range(0, len(message), max_length)] |
|
|
|
def list_available_models(): |
|
return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()]) |
|
|
|
@app.route('/api/wx', methods=['GET', 'POST']) |
|
def wechatai(): |
|
if request.method == 'GET': |
|
return verify_wechat(request) |
|
else: |
|
|
|
print("user request data: ", request.data) |
|
user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(request.data) |
|
print("user message content: ", user_message_content) |
|
|
|
if user_message_content.lower() == '/models': |
|
response_content = f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型" |
|
elif user_message_content.lower().startswith('/model'): |
|
model = user_message_content.split(' ')[1] |
|
if model in AVAILABLE_MODELS: |
|
user_sessions[from_user_name] = {'model': model, 'messages': [], 'pending_response': []} |
|
response_content = f'模型已切换为 {AVAILABLE_MODELS[model]}' |
|
else: |
|
response_content = f'无效的模型名称。可用的模型有:\n{list_available_models()}' |
|
elif user_message_content.lower() == '继续': |
|
if from_user_name in user_sessions and user_sessions[from_user_name]['pending_response']: |
|
response_content = user_sessions[from_user_name]['pending_response'].pop(0) |
|
if user_sessions[from_user_name]['pending_response']: |
|
response_content += "\n\n回复"继续"获取下一部分。" |
|
else: |
|
response_content += "\n\n回复结束。" |
|
else: |
|
response_content = "没有待发送的消息。" |
|
else: |
|
if from_user_name not in user_sessions: |
|
user_sessions[from_user_name] = {'model': 'gpt-3.5-turbo', 'messages': [], 'pending_response': []} |
|
|
|
session = user_sessions[from_user_name] |
|
session['messages'].append({"role": "user", "content": user_message_content}) |
|
|
|
gpt_response = get_openai_response(session['messages'], session['model']) |
|
session['messages'].append({"role": "assistant", "content": gpt_response}) |
|
|
|
response_parts = split_message(gpt_response) |
|
|
|
if len(response_parts) > 1: |
|
response_content = response_parts[0] + "\n\n回复"继续"获取下一部分。" |
|
session['pending_response'] = response_parts[1:] |
|
else: |
|
response_content = response_parts[0] |
|
|
|
return generate_response_xml(from_user_name, to_user_name, response_content) |
|
|
|
if __name__ == '__main__': |
|
app.run(host='0.0.0.0', port=7860, debug=True) |