moriire commited on
Commit
cefc820
1 Parent(s): 886ba9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -17,7 +17,15 @@ class GenModel(BaseModel):
17
  mirostat_mode: int=2
18
  mirostat_tau: float=4.0
19
  mirostat_eta: float=1.1
20
-
 
 
 
 
 
 
 
 
21
  llm_chat = llama_cpp.Llama.from_pretrained(
22
  repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
23
  filename="*q4_0.gguf",
@@ -66,15 +74,15 @@ def health():
66
 
67
  # Chat Completion API
68
  @app.post("/chat/")
69
- async def chat(gen:GenModel):
70
  try:
71
  messages=[
72
- {"role": "assistant", "content": gen.system},
73
- **gen.question
74
  ]
75
  st = time()
76
 
77
- messages.append({"role": "user", "content": gen.question})
78
  output = llm_chat.create_chat_completion(
79
  messages = messages,
80
  temperature=gen.temperature,
 
17
  mirostat_mode: int=2
18
  mirostat_tau: float=4.0
19
  mirostat_eta: float=1.1
20
+
21
+ class ChatModel(BaseModel):
22
+ question: list
23
+ system: str = "You are a helpful medical AI chat assistant. Help as much as you can.Also continuously ask for possible symptoms in order to atat a conclusive ailment or sickness and possible solutions.Remember, response in English."
24
+ temperature: float = 0.8
25
+ seed: int = 101
26
+ mirostat_mode: int=2
27
+ mirostat_tau: float=4.0
28
+ mirostat_eta: float=1.1
29
  llm_chat = llama_cpp.Llama.from_pretrained(
30
  repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
31
  filename="*q4_0.gguf",
 
74
 
75
  # Chat Completion API
76
  @app.post("/chat/")
77
+ async def chat(chatm:ChatModel):
78
  try:
79
  messages=[
80
+ {"role": "assistant", "content": chatm.system},
81
+ **chatm.question
82
  ]
83
  st = time()
84
 
85
+ messages.append({"role": "user", "content": chatm.question})
86
  output = llm_chat.create_chat_completion(
87
  messages = messages,
88
  temperature=gen.temperature,