Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import torch
|
|
5 |
import spaces
|
6 |
|
7 |
# Load the model and processor
|
8 |
-
model = gen_model.VLMForCausalLM.from_pretrained("unum-cloud/uform-gen")
|
9 |
processor = gen_model.VLMProcessor.from_pretrained("unum-cloud/uform-gen")
|
10 |
|
11 |
@spaces.GPU
|
@@ -22,7 +22,7 @@ def generate_caption(image, prompt):
|
|
22 |
max_new_tokens=128,
|
23 |
eos_token_id=32001,
|
24 |
pad_token_id=processor.tokenizer.pad_token_id
|
25 |
-
)
|
26 |
|
27 |
prompt_len = inputs["input_ids"].shape[1]
|
28 |
decoded_text = processor.batch_decode(output[:, prompt_len:])[0]
|
|
|
5 |
import spaces
|
6 |
|
7 |
# Load the model and processor
|
8 |
+
model = gen_model.VLMForCausalLM.from_pretrained("unum-cloud/uform-gen").to('cuda')
|
9 |
processor = gen_model.VLMProcessor.from_pretrained("unum-cloud/uform-gen")
|
10 |
|
11 |
@spaces.GPU
|
|
|
22 |
max_new_tokens=128,
|
23 |
eos_token_id=32001,
|
24 |
pad_token_id=processor.tokenizer.pad_token_id
|
25 |
+
)
|
26 |
|
27 |
prompt_len = inputs["input_ids"].shape[1]
|
28 |
decoded_text = processor.batch_decode(output[:, prompt_len:])[0]
|