Update app.py
Browse files
app.py
CHANGED
@@ -16,10 +16,10 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
16 |
model.to(device)
|
17 |
|
18 |
def generate_text(question):
|
19 |
-
prompt = f'Q: {question}\nA:
|
20 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
21 |
tokens = model.generate(input_ids, max_length=22, pad_token_id=tokenizer.eos_token_id)
|
22 |
-
response = tokenizer.decode(tokens[0], skip_special_tokens=
|
23 |
return response.split('\nA: ')[-1]
|
24 |
|
25 |
# Gradio Blocks interface
|
|
|
16 |
model.to(device)
|
17 |
|
18 |
def generate_text(question):
|
19 |
+
prompt = f'Q: {question}\nA:'
|
20 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
21 |
tokens = model.generate(input_ids, max_length=22, pad_token_id=tokenizer.eos_token_id)
|
22 |
+
response = tokenizer.decode(tokens[0], skip_special_tokens=False)
|
23 |
return response.split('\nA: ')[-1]
|
24 |
|
25 |
# Gradio Blocks interface
|