Update app/llm.py
Browse files- app/llm.py +3 -3
app/llm.py
CHANGED
@@ -39,8 +39,8 @@ llm_chat = llama_cpp.Llama.from_pretrained(
|
|
39 |
#chat_format="llama-2"
|
40 |
)
|
41 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
42 |
-
repo_id="
|
43 |
-
filename="
|
44 |
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B"),
|
45 |
verbose=False,
|
46 |
n_ctx=4096,
|
@@ -75,7 +75,7 @@ def health():
|
|
75 |
# Chat Completion API
|
76 |
@llm_router.post("/chat/", tags=["llm"])
|
77 |
async def chat(chatm:ChatModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|
78 |
-
|
79 |
try:
|
80 |
st = time()
|
81 |
output = llm_chat.create_chat_completion(
|
|
|
39 |
#chat_format="llama-2"
|
40 |
)
|
41 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
42 |
+
repo_id="moriire/healthcare-GGUF",
|
43 |
+
filename="*.gguf",
|
44 |
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B"),
|
45 |
verbose=False,
|
46 |
n_ctx=4096,
|
|
|
75 |
# Chat Completion API
|
76 |
@llm_router.post("/chat/", tags=["llm"])
|
77 |
async def chat(chatm:ChatModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|
78 |
+
chatm.system = chatm.system.format("")#user.email)
|
79 |
try:
|
80 |
st = time()
|
81 |
output = llm_chat.create_chat_completion(
|