mistpe commited on
Commit
a4ae7b7
1 Parent(s): ca57c49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -9
app.py CHANGED
@@ -1,23 +1,33 @@
1
  import os
2
  from flask import Flask, request, make_response
3
- import xml.etree.ElementTree as ET
4
  from wechatpy import parse_message, create_reply
5
  from wechatpy.utils import check_signature
6
  from wechatpy.exceptions import InvalidSignatureException
7
  from openai import OpenAI
 
 
 
 
8
 
9
  app = Flask(__name__)
10
 
11
  # 配置
12
- TOKEN = os.getenv("TOKEN")
13
  API_KEY = os.getenv("OPENAI_API_KEY")
14
  BASE_URL = os.getenv("OPENAI_BASE_URL")
15
  client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
16
 
 
 
 
 
 
 
 
17
  # 存储用户会话信息
18
  user_sessions = {}
19
 
20
- def get_openai_response(messages, model="gpt-4o-mini"):
21
  try:
22
  response = client.chat.completions.create(
23
  model=model,
@@ -31,6 +41,9 @@ def get_openai_response(messages, model="gpt-4o-mini"):
31
  def split_message(message, max_length=500):
32
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
33
 
 
 
 
34
  @app.route('/', methods=['GET', 'POST'])
35
  def wechat():
36
  if request.method == 'GET':
@@ -56,14 +69,19 @@ def wechat():
56
  user_id = msg.source
57
  content = msg.content
58
 
59
- if content.startswith('/model'):
60
- # 切换模型
 
 
61
  model = content.split(' ')[1]
62
- user_sessions[user_id] = {'model': model, 'messages': []}
63
- return create_reply(f'模型已切换为 {model}', msg).render()
 
 
 
64
 
65
  if user_id not in user_sessions:
66
- user_sessions[user_id] = {'model': 'gpt-4o-mini', 'messages': []}
67
 
68
  session = user_sessions[user_id]
69
  session['messages'].append({"role": "user", "content": content})
@@ -71,7 +89,6 @@ def wechat():
71
  response = get_openai_response(session['messages'], session['model'])
72
  session['messages'].append({"role": "assistant", "content": response})
73
 
74
- # 分割长消息
75
  response_parts = split_message(response)
76
  for part in response_parts[:-1]:
77
  create_reply(part, msg).render()
 
1
  import os
2
  from flask import Flask, request, make_response
 
3
  from wechatpy import parse_message, create_reply
4
  from wechatpy.utils import check_signature
5
  from wechatpy.exceptions import InvalidSignatureException
6
  from openai import OpenAI
7
+ from dotenv import load_dotenv
8
+
9
+ # 加载环境变量
10
+ load_dotenv()
11
 
12
  app = Flask(__name__)
13
 
14
  # 配置
15
+ TOKEN = os.getenv('TOKEN')
16
  API_KEY = os.getenv("OPENAI_API_KEY")
17
  BASE_URL = os.getenv("OPENAI_BASE_URL")
18
  client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
19
 
20
+ # 定义可用的模型列表
21
+ AVAILABLE_MODELS = {
22
+ 'gpt-3.5-turbo': 'GPT-3.5 Turbo',
23
+ 'gpt-4': 'GPT-4',
24
+ 'gpt-4-turbo': 'GPT-4 Turbo',
25
+ }
26
+
27
  # 存储用户会话信息
28
  user_sessions = {}
29
 
30
+ def get_openai_response(messages, model="gpt-3.5-turbo"):
31
  try:
32
  response = client.chat.completions.create(
33
  model=model,
 
41
  def split_message(message, max_length=500):
42
  return [message[i:i+max_length] for i in range(0, len(message), max_length)]
43
 
44
+ def list_available_models():
45
+ return "\n".join([f"{key}: {value}" for key, value in AVAILABLE_MODELS.items()])
46
+
47
  @app.route('/', methods=['GET', 'POST'])
48
  def wechat():
49
  if request.method == 'GET':
 
69
  user_id = msg.source
70
  content = msg.content
71
 
72
+ if content.lower() == '/models':
73
+ return create_reply(f"可用的模型列表:\n{list_available_models()}\n\n使用 /model 模型名称 来切换模型", msg).render()
74
+
75
+ if content.lower().startswith('/model'):
76
  model = content.split(' ')[1]
77
+ if model in AVAILABLE_MODELS:
78
+ user_sessions[user_id] = {'model': model, 'messages': []}
79
+ return create_reply(f'模型已切换为 {AVAILABLE_MODELS[model]}', msg).render()
80
+ else:
81
+ return create_reply(f'无效的模型名称。可用的模型有:\n{list_available_models()}', msg).render()
82
 
83
  if user_id not in user_sessions:
84
+ user_sessions[user_id] = {'model': 'gpt-3.5-turbo', 'messages': []}
85
 
86
  session = user_sessions[user_id]
87
  session['messages'].append({"role": "user", "content": content})
 
89
  response = get_openai_response(session['messages'], session['model'])
90
  session['messages'].append({"role": "assistant", "content": response})
91
 
 
92
  response_parts = split_message(response)
93
  for part in response_parts[:-1]:
94
  create_reply(part, msg).render()