macc2692 commited on
Commit
b78812a
1 Parent(s): 8a8c000

Changed model to Writer/Palmyra-Fin-70B-32K

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -13,12 +13,14 @@ theme = gr.themes.Base(
13
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
14
  )
15
 
16
- tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Experiment26-7B", trust_remote_code=True)
17
- model = AutoModelForCausalLM.from_pretrained(
18
- "yam-peleg/Experiment26-7B",
19
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
20
- trust_remote_code=True,
21
- ).to(device)
 
 
22
  @spaces.GPU(enable_queue=True)
23
  def generate_text(text, temperature, maxLen):
24
  text = text.lstrip().lstrip('<s>').lstrip()
 
13
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
14
  )
15
 
16
+ tokenizer = AutoTokenizer.from_pretrained("Writer/Palmyra-Fin-70B-32K")
17
+ model = AutoModelForCausalLM.from_pretrained("Writer/Palmyra-Fin-70B-32K")
18
+ #tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Experiment26-7B", trust_remote_code=True)
19
+ #model = AutoModelForCausalLM.from_pretrained(
20
+ # "yam-peleg/Experiment26-7B",
21
+ # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
22
+ # trust_remote_code=True,
23
+ #).to(device)
24
  @spaces.GPU(enable_queue=True)
25
  def generate_text(text, temperature, maxLen):
26
  text = text.lstrip().lstrip('<s>').lstrip()