Niansuh commited on
Commit
11a3d84
1 Parent(s): 3f8b380

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -74,7 +74,7 @@ def chat_completions():
74
  full_response = ""
75
  while True:
76
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
77
- conversation += "\n请关注并回复user最近的消息并避免总结对话历史的回答"
78
 
79
  payload = {
80
  "text": conversation,
@@ -101,19 +101,19 @@ def chat_completions():
101
 
102
  if finish_reason == 'length':
103
  messages.append({"role": "assistant", "content": full_response})
104
- messages.append({"role": "user", "content": "请继续你的输出,不要重复之前的内容"})
105
- break # 跳出当前循环,继续下一次请求
106
  else:
107
- # 正常结束,发送最后的内容(如果有的话)
108
  last_content = response_message.get('text', '')
109
  if last_content and last_content != full_response:
110
  yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
111
 
112
  yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
113
  yield "data: [DONE]\n\n"
114
- return # 完全结束生成
115
 
116
- # 如果因为多次长度限制而最终结束,发送一个停止信号
117
  yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
118
  yield "data: [DONE]\n\n"
119
 
 
74
  full_response = ""
75
  while True:
76
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
77
+ conversation += "\nPlease follow and reply to the user’s recent messages and avoid answers that summarize the conversation history."
78
 
79
  payload = {
80
  "text": conversation,
 
101
 
102
  if finish_reason == 'length':
103
  messages.append({"role": "assistant", "content": full_response})
104
+ messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
105
+ break # Jump out of the current loop and continue with the next request
106
  else:
107
+ # End normally, sending the final content (if any)
108
  last_content = response_message.get('text', '')
109
  if last_content and last_content != full_response:
110
  yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
111
 
112
  yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
113
  yield "data: [DONE]\n\n"
114
+ return # completely end generation
115
 
116
+ # If it ends due to multiple length limits, send a stop signal
117
  yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
118
  yield "data: [DONE]\n\n"
119