moriire commited on
Commit
35cf77a
1 Parent(s): bbf91e0

Update app/llm.py

Browse files
Files changed (1) hide show
  1. app/llm.py +2 -2
app/llm.py CHANGED
@@ -32,7 +32,7 @@ class ChatModel(BaseModel):
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
  repo_id="moriire/healthcare-GGUF",
34
  filename="healthcare-GGUF-unsloth.Q4_K_M.gguf",
35
- tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-GGUF"),
36
  verbose=False,
37
  n_ctx=1024,
38
  n_gpu_layers=0,
@@ -41,7 +41,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
  repo_id="moriire/healthcare-GGUF",
43
  filename="healthcare-GGUF-unsloth.Q4_K_M.gguf",
44
- tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-GGUF"),
45
  verbose=False,
46
  n_ctx=4096,
47
  n_gpu_layers=0,
 
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
  repo_id="moriire/healthcare-GGUF",
34
  filename="healthcare-GGUF-unsloth.Q4_K_M.gguf",
35
+ tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-adapter-merged"),
36
  verbose=False,
37
  n_ctx=1024,
38
  n_gpu_layers=0,
 
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
  repo_id="moriire/healthcare-GGUF",
43
  filename="healthcare-GGUF-unsloth.Q4_K_M.gguf",
44
+ tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-adapter-merged"),
45
  verbose=False,
46
  n_ctx=4096,
47
  n_gpu_layers=0,