|
import os |
|
from flask import Flask, request, make_response |
|
from wechatpy import parse_message, create_reply |
|
from wechatpy.utils import check_signature |
|
from wechatpy.exceptions import InvalidSignatureException |
|
from openai import OpenAI |
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
TOKEN = os.getenv('TOKEN') |
|
API_KEY = os.getenv("OPENAI_API_KEY") |
|
BASE_URL = os.getenv("OPENAI_BASE_URL") |
|
client = OpenAI(api_key=API_KEY, base_url=BASE_URL) |
|
|
|
|
|
AVAILABLE_MODELS = { |
|
'gpt-3.5-turbo': 'GPT-3.5 Turbo', |
|
'gpt-4': 'GPT-4', |
|
'gpt-4-turbo': 'GPT-4 Turbo', |
|
} |
|
|
|
|
|
user_sessions = {} |
|
|
|
def get_openai_response(messages, model="gpt-3.5-turbo"): |
|
try: |
|
response = client.chat.completions.create( |
|
model=model, |
|
messages=messages |
|
) |
|
return response.choices[0].message.content |
|
except Exception as e: |
|
print(f"调用OpenAI API时出错: {str(e)}") |
|
return "抱歉,我遇到了一些问题,无法回答您的问题。" |
|
|
|
def split_message(message, max_length=500): |
|
return [message[i:i+max_length] for i in range(0, len(message), max_length)] |
|
|
|
def list_available_models(): |
|
return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()]) |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def wechat(): |
|
if request.method == 'GET': |
|
token = TOKEN |
|
query = request.args |
|
signature = query.get('signature', '') |
|
timestamp = query.get('timestamp', '') |
|
nonce = query.get('nonce', '') |
|
echostr = query.get('echostr', '') |
|
try: |
|
check_signature(token, signature, timestamp, nonce) |
|
except InvalidSignatureException: |
|
return 'Invalid signature' |
|
return echostr |
|
|
|
if request.method == 'POST': |
|
xml_str = request.data |
|
if not xml_str: |
|
return "" |
|
|
|
msg = parse_message(xml_str) |
|
if msg.type == 'text': |
|
user_id = msg.source |
|
content = msg.content |
|
|
|
if content.lower() == '/models': |
|
return create_reply(f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型", msg).render() |
|
|
|
if content.lower().startswith('/model'): |
|
model = content.split(' ')[1] |
|
if model in AVAILABLE_MODELS: |
|
user_sessions[user_id] = {'model': model, 'messages': []} |
|
return create_reply(f'模型已切换为 {AVAILABLE_MODELS[model]}', msg).render() |
|
else: |
|
return create_reply(f'无效的模型名称。可用的模型有:\n{list_available_models()}', msg).render() |
|
|
|
if user_id not in user_sessions: |
|
user_sessions[user_id] = {'model': 'gpt-3.5-turbo', 'messages': []} |
|
|
|
session = user_sessions[user_id] |
|
session['messages'].append({"role": "user", "content": content}) |
|
|
|
response = get_openai_response(session['messages'], session['model']) |
|
session['messages'].append({"role": "assistant", "content": response}) |
|
|
|
response_parts = split_message(response) |
|
for part in response_parts[:-1]: |
|
create_reply(part, msg).render() |
|
|
|
return create_reply(response_parts[-1], msg).render() |
|
|
|
return create_reply('Sorry, can not handle this for now', msg).render() |
|
|
|
if __name__ == '__main__': |
|
app.run(host='0.0.0.0', port=7860) |