update model init with float16
Browse files- README.md +11 -4
- config.json +1 -1
- processing_florence2.py +1 -1
- sample_inference.ipynb +0 -0
README.md
CHANGED
@@ -32,11 +32,15 @@ Use the code below to get started with the model.
|
|
32 |
```python
|
33 |
import requests
|
34 |
|
|
|
35 |
from PIL import Image
|
36 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
37 |
|
38 |
|
39 |
-
|
|
|
|
|
|
|
40 |
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
|
41 |
|
42 |
prompt = "<OD>"
|
@@ -44,7 +48,7 @@ prompt = "<OD>"
|
|
44 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
|
45 |
image = Image.open(requests.get(url, stream=True).raw)
|
46 |
|
47 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
48 |
|
49 |
generated_ids = model.generate(
|
50 |
input_ids=inputs["input_ids"],
|
@@ -74,11 +78,14 @@ First, let's define a function to run a prompt.
|
|
74 |
```python
|
75 |
import requests
|
76 |
|
|
|
77 |
from PIL import Image
|
78 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
79 |
|
|
|
|
|
80 |
|
81 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
|
82 |
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
|
83 |
|
84 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
|
@@ -89,7 +96,7 @@ def run_example(task_prompt, text_input=None):
|
|
89 |
prompt = task_prompt
|
90 |
else:
|
91 |
prompt = task_prompt + text_input
|
92 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
93 |
generated_ids = model.generate(
|
94 |
input_ids=inputs["input_ids"],
|
95 |
pixel_values=inputs["pixel_values"],
|
|
|
32 |
```python
|
33 |
import requests
|
34 |
|
35 |
+
import torch
|
36 |
from PIL import Image
|
37 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
38 |
|
39 |
|
40 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
41 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
42 |
+
|
43 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
44 |
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
|
45 |
|
46 |
prompt = "<OD>"
|
|
|
48 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
|
49 |
image = Image.open(requests.get(url, stream=True).raw)
|
50 |
|
51 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
|
52 |
|
53 |
generated_ids = model.generate(
|
54 |
input_ids=inputs["input_ids"],
|
|
|
78 |
```python
|
79 |
import requests
|
80 |
|
81 |
+
import torch
|
82 |
from PIL import Image
|
83 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
84 |
|
85 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
86 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
87 |
|
88 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
89 |
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
|
90 |
|
91 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
|
|
|
96 |
prompt = task_prompt
|
97 |
else:
|
98 |
prompt = task_prompt + text_input
|
99 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
|
100 |
generated_ids = model.generate(
|
101 |
input_ids=inputs["input_ids"],
|
102 |
pixel_values=inputs["pixel_values"],
|
config.json
CHANGED
@@ -79,7 +79,7 @@
|
|
79 |
"image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
|
80 |
},
|
81 |
"vocab_size": 51289,
|
82 |
-
"torch_dtype": "
|
83 |
"transformers_version": "4.41.0.dev0",
|
84 |
"is_encoder_decoder": true
|
85 |
}
|
|
|
79 |
"image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
|
80 |
},
|
81 |
"vocab_size": 51289,
|
82 |
+
"torch_dtype": "float16",
|
83 |
"transformers_version": "4.41.0.dev0",
|
84 |
"is_encoder_decoder": true
|
85 |
}
|
processing_florence2.py
CHANGED
@@ -324,7 +324,7 @@ class Florence2Processor(ProcessorMixin):
|
|
324 |
if task_answer_post_processing_type == 'pure_text':
|
325 |
final_answer = task_answer
|
326 |
# remove the special tokens
|
327 |
-
final_answer = final_answer.replace('<s>', '').replace('</s>', '
|
328 |
elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
|
329 |
od_instances = task_answer
|
330 |
bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
|
|
|
324 |
if task_answer_post_processing_type == 'pure_text':
|
325 |
final_answer = task_answer
|
326 |
# remove the special tokens
|
327 |
+
final_answer = final_answer.replace('<s>', '').replace('</s>', '')
|
328 |
elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
|
329 |
od_instances = task_answer
|
330 |
bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
|
sample_inference.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|