tianlong12 commited on
Commit
fed1648
1 Parent(s): bf3f697

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -54
app.py CHANGED
@@ -9,26 +9,6 @@ app = Flask(__name__)
9
  def generate_random_ip():
10
  return f"{random.randint(1,255)}.{random.randint(0,255)}.{random.randint(0,255)}.{random.randint(0,255)}"
11
 
12
- def generate_user_agent():
13
- os_list = ['Windows NT 10.0', 'Windows NT 6.1', 'Mac OS X 10_15_7', 'Ubuntu', 'Linux x86_64']
14
- browser_list = ['Chrome', 'Firefox', 'Safari', 'Edge']
15
- chrome_version = f"{random.randint(70, 126)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}"
16
- firefox_version = f"{random.randint(70, 100)}.0"
17
- safari_version = f"{random.randint(600, 615)}.{random.randint(1, 9)}.{random.randint(1, 9)}"
18
- edge_version = f"{random.randint(80, 100)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}"
19
-
20
- os = random.choice(os_list)
21
- browser = random.choice(browser_list)
22
-
23
- if browser == 'Chrome':
24
- return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{chrome_version} Safari/537.36"
25
- elif browser == 'Firefox':
26
- return f"Mozilla/5.0 ({os}; rv:{firefox_version}) Gecko/20100101 Firefox/{firefox_version}"
27
- elif browser == 'Safari':
28
- return f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/{safari_version} (KHTML, like Gecko) Version/{safari_version.split('.')[0]}.1.2 Safari/{safari_version}"
29
- elif browser == 'Edge':
30
- return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{edge_version} Safari/537.36 Edg/{edge_version}"
31
-
32
  def format_openai_response(content, finish_reason=None):
33
  return {
34
  "id": "chatcmpl-123",
@@ -43,7 +23,6 @@ def format_openai_response(content, finish_reason=None):
43
  }
44
 
45
  @app.route('/hf/v1/chat/completions', methods=['POST'])
46
-
47
  def chat_completions():
48
  data = request.json
49
  messages = data.get('messages', [])
@@ -52,15 +31,8 @@ def chat_completions():
52
  if not messages:
53
  return {"error": "No messages provided"}, 400
54
 
55
- # 将消息列表转换为单个字符串,保留对话历史
56
- conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
57
-
58
- # 添加指导语
59
- conversation += "\n请关注并回复user最近的消息并避免总结对话历史的回答"
60
-
61
  model = data.get('model', 'gpt-4o')
62
-
63
- # 根据模型名称设置endpoint和URL
64
  if model.startswith('gpt'):
65
  endpoint = "openAI"
66
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI'
@@ -69,43 +41,57 @@ def chat_completions():
69
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude'
70
  else:
71
  return {"error": "Unsupported model"}, 400
72
-
73
  headers = {
74
  'content-type': 'application/json',
75
  'X-Forwarded-For': generate_random_ip(),
76
  'origin': 'https://chatpro.ai-pro.org',
77
  'user-agent': generate_user_agent()
78
  }
79
-
80
- payload = {
81
- "text": conversation,
82
- "endpoint": endpoint,
83
- "model": model
84
- }
85
-
86
  def generate():
87
- last_content = ""
88
- response = requests.post(original_api_url, headers=headers, json=payload, stream=True)
89
- client = sseclient.SSEClient(response)
90
-
91
- for event in client.events():
92
- if event.data.startswith('{"text":'):
93
- data = json.loads(event.data)
94
- new_content = data['text'][len(last_content):]
95
- last_content = data['text']
96
-
97
- if new_content:
98
- yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
99
 
100
- elif '"final":true' in event.data:
101
- yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
102
- yield "data: [DONE]\n\n"
103
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  if stream:
106
  return Response(stream_with_context(generate()), content_type='text/event-stream')
107
  else:
108
  full_response = ""
 
109
  for chunk in generate():
110
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
111
  response_data = json.loads(chunk[6:])
@@ -113,6 +99,8 @@ def chat_completions():
113
  delta = response_data['choices'][0].get('delta', {})
114
  if 'content' in delta:
115
  full_response += delta['content']
 
 
116
 
117
  return {
118
  "id": "chatcmpl-123",
@@ -125,7 +113,7 @@ def chat_completions():
125
  "role": "assistant",
126
  "content": full_response
127
  },
128
- "finish_reason": "stop"
129
  }],
130
  "usage": {
131
  "prompt_tokens": 0,
 
9
  def generate_random_ip():
10
  return f"{random.randint(1,255)}.{random.randint(0,255)}.{random.randint(0,255)}.{random.randint(0,255)}"
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def format_openai_response(content, finish_reason=None):
13
  return {
14
  "id": "chatcmpl-123",
 
23
  }
24
 
25
  @app.route('/hf/v1/chat/completions', methods=['POST'])
 
26
  def chat_completions():
27
  data = request.json
28
  messages = data.get('messages', [])
 
31
  if not messages:
32
  return {"error": "No messages provided"}, 400
33
 
 
 
 
 
 
 
34
  model = data.get('model', 'gpt-4o')
35
+
 
36
  if model.startswith('gpt'):
37
  endpoint = "openAI"
38
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI'
 
41
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude'
42
  else:
43
  return {"error": "Unsupported model"}, 400
44
+
45
  headers = {
46
  'content-type': 'application/json',
47
  'X-Forwarded-For': generate_random_ip(),
48
  'origin': 'https://chatpro.ai-pro.org',
49
  'user-agent': generate_user_agent()
50
  }
51
+
 
 
 
 
 
 
52
  def generate():
53
+ nonlocal messages
54
+ full_response = ""
55
+ while True:
56
+ conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
57
+ conversation += "\n请关注并回复user最近的消息并避免总结对话历史的回答"
 
 
 
 
 
 
 
58
 
59
+ payload = {
60
+ "text": conversation,
61
+ "endpoint": endpoint,
62
+ "model": model
63
+ }
64
+
65
+ response = requests.post(original_api_url, headers=headers, json=payload, stream=True)
66
+ client = sseclient.SSEClient(response)
67
+
68
+ for event in client.events():
69
+ if event.data.startswith('{"text":'):
70
+ data = json.loads(event.data)
71
+ new_content = data['text'][len(full_response):]
72
+ full_response = data['text']
73
+
74
+ if new_content:
75
+ yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
76
+
77
+ elif '"final":true' in event.data:
78
+ final_data = json.loads(event.data)
79
+ finish_reason = final_data.get('responseMessage', {}).get('finish_reason', 'stop')
80
+ if finish_reason == 'length':
81
+ # ��果因为长度被截断,添加已生成的回复到消息列表,并继续生成
82
+ messages.append({"role": "assistant", "content": full_response})
83
+ messages.append({"role": "user", "content": "请继续你的输出"})
84
+ break
85
+ else:
86
+ yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
87
+ yield "data: [DONE]\n\n"
88
+ return
89
 
90
  if stream:
91
  return Response(stream_with_context(generate()), content_type='text/event-stream')
92
  else:
93
  full_response = ""
94
+ finish_reason = "stop"
95
  for chunk in generate():
96
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
97
  response_data = json.loads(chunk[6:])
 
99
  delta = response_data['choices'][0].get('delta', {})
100
  if 'content' in delta:
101
  full_response += delta['content']
102
+ if 'finish_reason' in delta:
103
+ finish_reason = delta['finish_reason']
104
 
105
  return {
106
  "id": "chatcmpl-123",
 
113
  "role": "assistant",
114
  "content": full_response
115
  },
116
+ "finish_reason": finish_reason
117
  }],
118
  "usage": {
119
  "prompt_tokens": 0,