Aspik101 commited on
Commit
97a1535
1 Parent(s): 55dd95f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
2
  import random
3
  import time
4
  from ctransformers import AutoModelForCausalLM
 
 
5
 
6
  params = {
7
  "max_new_tokens":512,
@@ -11,8 +13,9 @@ params = {
11
  "stream":True,
12
  "batch_size": 8}
13
 
14
-
15
- llm = AutoModelForCausalLM.from_pretrained("Aspik101/trurl-2-13b-GGML", model_type="llama")
 
16
 
17
  with gr.Blocks() as demo:
18
  chatbot = gr.Chatbot()
@@ -25,7 +28,8 @@ with gr.Blocks() as demo:
25
  def bot(history):
26
  print(history)
27
  #stream = llm(prompt = f"Jesteś AI assystentem. Odpowiadaj po polski. <user>: {history}. <assistant>:", **params)
28
- stream = llm(prompt = f"{history}", **params)
 
29
  history[-1][1] = ""
30
  answer_save = ""
31
  for character in stream:
 
2
  import random
3
  import time
4
  from ctransformers import AutoModelForCausalLM
5
+ from dl_hf_model import dl_hf_model
6
+
7
 
8
  params = {
9
  "max_new_tokens":512,
 
13
  "stream":True,
14
  "batch_size": 8}
15
 
16
+ url = "https://huggingface.co/Aspik101/trurl-2-13b-GGML/blob/main/trurl-2-13b.ggmlv3.q8_0.bin"
17
+ model_loc, file_size = dl_hf_model(url)
18
+ llm = AutoModelForCausalLM.from_pretrained(model_loc, model_type="llama")
19
 
20
  with gr.Blocks() as demo:
21
  chatbot = gr.Chatbot()
 
28
  def bot(history):
29
  print(history)
30
  #stream = llm(prompt = f"Jesteś AI assystentem. Odpowiadaj po polski. <user>: {history}. <assistant>:", **params)
31
+ #stream = llm(prompt = f"{history}", **params)
32
+ stream = llm(prompt = f"Jesteś AI assystentem. Odpowiadaj po polski. {history}.", **params)
33
  history[-1][1] = ""
34
  answer_save = ""
35
  for character in stream: