Spaces:
Running
on
Zero
Running
on
Zero
jadechoghari
commited on
Commit
•
4e961a2
1
Parent(s):
445bd13
add bbox support
Browse files- Screenshot 2024-10-24 at 19.50.06.png +3 -0
- Screenshot 2024-10-24 at 19.52.12.png +3 -0
- __pycache__/builder.cpython-310.pyc +0 -0
- __pycache__/conversation.cpython-310.pyc +0 -0
- __pycache__/inference.cpython-310.pyc +0 -0
- __pycache__/mm_utils.cpython-310.pyc +0 -0
- __pycache__/model_UI.cpython-310.pyc +0 -0
- app.py +86 -102
- appv1.py +156 -0
- clipboard.png +3 -0
- eval.json +1 -1
- eval_output.jsonl/0_of_1.jsonl +1 -1
- temp_image.png +3 -0
- Новый проект 1.png +3 -0
- Новый проект 4.png +3 -0
Screenshot 2024-10-24 at 19.50.06.png
ADDED
Git LFS Details
|
Screenshot 2024-10-24 at 19.52.12.png
ADDED
Git LFS Details
|
__pycache__/builder.cpython-310.pyc
CHANGED
Binary files a/__pycache__/builder.cpython-310.pyc and b/__pycache__/builder.cpython-310.pyc differ
|
|
__pycache__/conversation.cpython-310.pyc
CHANGED
Binary files a/__pycache__/conversation.cpython-310.pyc and b/__pycache__/conversation.cpython-310.pyc differ
|
|
__pycache__/inference.cpython-310.pyc
CHANGED
Binary files a/__pycache__/inference.cpython-310.pyc and b/__pycache__/inference.cpython-310.pyc differ
|
|
__pycache__/mm_utils.cpython-310.pyc
CHANGED
Binary files a/__pycache__/mm_utils.cpython-310.pyc and b/__pycache__/mm_utils.cpython-310.pyc differ
|
|
__pycache__/model_UI.cpython-310.pyc
CHANGED
Binary files a/__pycache__/model_UI.cpython-310.pyc and b/__pycache__/model_UI.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -2,88 +2,67 @@ import gradio as gr
|
|
2 |
from inference import inference_and_run
|
3 |
import spaces
|
4 |
import os
|
5 |
-
import re
|
6 |
import shutil
|
|
|
|
|
7 |
|
8 |
model_name = 'Ferret-UI'
|
9 |
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
10 |
|
11 |
@spaces.GPU()
|
12 |
-
def inference_with_gradio(chatbot,
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
dir_path = "./"
|
18 |
-
|
19 |
-
# Create the new path for the file (in the current directory)
|
20 |
image_path = os.path.join(dir_path, filename)
|
21 |
-
|
22 |
-
|
23 |
if "gemma" in model_path.lower():
|
24 |
conv_mode = "ferret_gemma_instruct"
|
25 |
else:
|
26 |
conv_mode = "ferret_llama_3"
|
27 |
|
28 |
-
|
29 |
-
#
|
30 |
-
# prompt=prompt,
|
31 |
-
# conv_mode=conv_mode,
|
32 |
-
# model_path=model_path,
|
33 |
-
# box=box
|
34 |
-
# )
|
35 |
inference_text = inference_and_run(
|
36 |
-
image_path=filename,
|
37 |
image_dir=dir_path,
|
38 |
prompt=prompt,
|
39 |
-
model_path=
|
40 |
conv_mode=conv_mode,
|
41 |
temperature=temperature,
|
42 |
top_p=top_p,
|
43 |
box=box,
|
44 |
max_new_tokens=max_new_tokens,
|
45 |
-
|
46 |
-
)
|
47 |
|
48 |
-
# print("done, now appending", inference_text)
|
49 |
-
# chatbot.append((prompt, inference_text))
|
50 |
-
# return chatbot
|
51 |
-
# Convert inference_text to string if it's not already
|
52 |
if isinstance(inference_text, (list, tuple)):
|
53 |
inference_text = str(inference_text[0])
|
54 |
-
|
55 |
-
# Update chatbot history
|
56 |
new_history = chatbot.copy() if chatbot else []
|
57 |
new_history.append((prompt, inference_text))
|
58 |
return new_history
|
59 |
|
60 |
def submit_chat(chatbot, text_input):
|
61 |
-
response = ''
|
62 |
-
# chatbot.append((text_input, response))
|
63 |
return chatbot, ''
|
64 |
|
65 |
def clear_chat():
|
66 |
-
return [], None, "",
|
67 |
-
|
68 |
-
# with open(f"{cur_dir}/logo.svg", "r", encoding="utf-8") as svg_file:
|
69 |
-
# svg_content = svg_file.read()
|
70 |
-
# font_size = "2.5em"
|
71 |
-
# svg_content = re.sub(r'(<svg[^>]*)(>)', rf'\1 height="{font_size}" style="vertical-align: middle; display: inline-block;"\2', svg_content)
|
72 |
-
# html = f"""
|
73 |
-
# <p align="center" style="font-size: {font_size}; line-height: 1;">
|
74 |
-
# <span style="display: inline-block; vertical-align: middle;">{svg_content}</span>
|
75 |
-
# <span style="display: inline-block; vertical-align: middle;">{model_name}</span>
|
76 |
-
# </p>
|
77 |
-
# <center><font size=3><b>{model_name}</b> Demo: Upload an image, provide a prompt, and get insights using advanced AI models. <a href='https://huggingface.co/jadechoghari/Ferret-UI-Gemma2b'>😊 Huggingface</a></font></center>
|
78 |
-
# """
|
79 |
-
|
80 |
-
# html = f"""
|
81 |
-
# <p align="center">
|
82 |
-
# <img src='data:image/png;base64,{image_data.encode("base64").decode("utf-8")}' alt='Ferret-UI' style='width: 100px; vertical-align: middle; border-radius: 15px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.1);'/>
|
83 |
-
# <span style="font-size: 2em; font-weight: bold; margin-left: 10px; vertical-align: middle;">{model_name}</span>
|
84 |
-
# </p>
|
85 |
-
# <center><font size=3><b>{model_name}</b> Demo: Upload an image, provide a prompt, and get insights using advanced AI models. <a href='https://huggingface.co/jadechoghari/Ferret-UI-Gemma2b'>😊 Huggingface</a></font></center>
|
86 |
-
# """
|
87 |
|
88 |
html = f"""
|
89 |
<div style="text-align: center; padding: 20px;">
|
@@ -93,7 +72,7 @@ html = f"""
|
|
93 |
style='width: 80px; height: 80px; border-radius: 20px; box-shadow: 0px 8px 16px rgba(0, 0, 0, 0.2);'/>
|
94 |
<div style="margin-left: 15px;">
|
95 |
<h1 style="font-size: 2.8em; font-family: -apple-system, BlinkMacSystemFont, sans-serif; color: #1D1D1F;
|
96 |
-
font-weight: bold; margin-bottom: 0;"
|
97 |
<p style="font-size: 1.2em; color: #6e6e73; font-family: -apple-system, BlinkMacSystemFont, sans-serif; margin-top: 5px;">
|
98 |
📱 Grounded Mobile UI Understanding with Multimodal LLMs.<br>
|
99 |
A new MLLM tailored for enhanced understanding of mobile UI screens, equipped with referring, grounding, and reasoning capabilities.
|
@@ -113,68 +92,73 @@ html = f"""
|
|
113 |
"""
|
114 |
|
115 |
latex_delimiters_set = [{
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
image_input = gr.Image(label="Upload Image", type="filepath", height=350)
|
131 |
-
text_input = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
|
132 |
-
model_dropdown = gr.Dropdown(choices=[
|
133 |
-
"jadechoghari/Ferret-UI-Gemma2b",
|
134 |
-
"jadechoghari/Ferret-UI-Llama8b",
|
135 |
-
], label="Model Path", value="jadechoghari/Ferret-UI-Gemma2b")
|
136 |
-
|
137 |
-
bounding_box_input = gr.Textbox(placeholder="Optional bounding box (x1, y1, x2, y2)", label="Bounding Box (optional)")
|
138 |
-
# Adding Sliders for temperature, top_p, and max_new_tokens
|
139 |
-
temperature_input = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=0.2, label="Temperature")
|
140 |
-
top_p_input = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label="Top P")
|
141 |
-
max_new_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=512, label="Max New Tokens")
|
142 |
-
|
143 |
-
|
144 |
-
chatbot = gr.Chatbot(label="Chat with Ferret-UI", height=400, show_copy_button=True, latex_delimiters=latex_delimiters_set, type="tuples")
|
145 |
-
|
146 |
-
with gr.Blocks(title=model_name, theme=gr.themes.Ocean()) as demo:
|
147 |
gr.HTML(html)
|
148 |
with gr.Row():
|
149 |
with gr.Column(scale=3):
|
150 |
-
image_input
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
gr.Examples(
|
158 |
-
examples=[
|
159 |
-
["appstore_reminders.png", "Describe the image in details", "jadechoghari/Ferret-UI-Gemma2b", None],
|
160 |
-
["appstore_reminders.png", "What's inside the selected region?", "jadechoghari/Ferret-UI-Gemma2b", "189, 906, 404, 970"],
|
161 |
-
["appstore_reminders.png", "Where is the Game Tab?", "jadechoghari/Ferret-UI-Gemma2b", None],
|
162 |
],
|
163 |
-
|
|
|
164 |
)
|
|
|
|
|
|
|
|
|
165 |
with gr.Column(scale=7):
|
166 |
-
chatbot.
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
with gr.Row():
|
168 |
send_btn = gr.Button("Send", variant="primary")
|
169 |
clear_btn = gr.Button("Clear", variant="secondary")
|
170 |
|
171 |
send_click_event = send_btn.click(
|
172 |
-
inference_with_gradio,
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
submit_event = text_input.submit(
|
175 |
-
inference_with_gradio,
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
-
clear_btn.click(
|
|
|
|
|
|
|
179 |
|
180 |
-
demo.launch()
|
|
|
2 |
from inference import inference_and_run
|
3 |
import spaces
|
4 |
import os
|
|
|
5 |
import shutil
|
6 |
+
from PIL import Image
|
7 |
+
from gradio_image_prompter import ImagePrompter
|
8 |
|
9 |
model_name = 'Ferret-UI'
|
10 |
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
11 |
|
12 |
@spaces.GPU()
|
13 |
+
def inference_with_gradio(chatbot, image_data, prompt, model_path, temperature=0.2, top_p=0.7, max_new_tokens=512):
|
14 |
+
if image_data is None:
|
15 |
+
raise gr.Error("Please upload an image and draw a bounding box if needed.")
|
16 |
+
|
17 |
+
# Handle the image and bounding box data
|
18 |
+
image = image_data["image"]
|
19 |
+
box = None
|
20 |
+
if "points" in image_data and image_data["points"] and len(image_data["points"]) > 0:
|
21 |
+
points = image_data["points"][0]
|
22 |
+
# Convert points to [x1, y1, x2, y2] format
|
23 |
+
box = f"{points[0]}, {points[1]}, {points[3]}, {points[4]}"
|
24 |
+
|
25 |
+
# Convert numpy array to a PIL Image
|
26 |
+
pil_image = Image.fromarray(image)
|
27 |
+
|
28 |
+
# Save the image
|
29 |
+
filename = "temp_image.png"
|
30 |
dir_path = "./"
|
|
|
|
|
31 |
image_path = os.path.join(dir_path, filename)
|
32 |
+
pil_image.save(image_path) # Save the PIL image to the file system
|
33 |
+
|
34 |
if "gemma" in model_path.lower():
|
35 |
conv_mode = "ferret_gemma_instruct"
|
36 |
else:
|
37 |
conv_mode = "ferret_llama_3"
|
38 |
|
39 |
+
print("the box: ", box)
|
40 |
+
# Call the main inference function with the model and mask (if applicable)
|
|
|
|
|
|
|
|
|
|
|
41 |
inference_text = inference_and_run(
|
42 |
+
image_path=filename,
|
43 |
image_dir=dir_path,
|
44 |
prompt=prompt,
|
45 |
+
model_path=model_path,
|
46 |
conv_mode=conv_mode,
|
47 |
temperature=temperature,
|
48 |
top_p=top_p,
|
49 |
box=box,
|
50 |
max_new_tokens=max_new_tokens,
|
51 |
+
)
|
|
|
52 |
|
|
|
|
|
|
|
|
|
53 |
if isinstance(inference_text, (list, tuple)):
|
54 |
inference_text = str(inference_text[0])
|
55 |
+
|
56 |
+
# Update chatbot history
|
57 |
new_history = chatbot.copy() if chatbot else []
|
58 |
new_history.append((prompt, inference_text))
|
59 |
return new_history
|
60 |
|
61 |
def submit_chat(chatbot, text_input):
|
|
|
|
|
62 |
return chatbot, ''
|
63 |
|
64 |
def clear_chat():
|
65 |
+
return [], None, "", 0.2, 0.7, 512
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
html = f"""
|
68 |
<div style="text-align: center; padding: 20px;">
|
|
|
72 |
style='width: 80px; height: 80px; border-radius: 20px; box-shadow: 0px 8px 16px rgba(0, 0, 0, 0.2);'/>
|
73 |
<div style="margin-left: 15px;">
|
74 |
<h1 style="font-size: 2.8em; font-family: -apple-system, BlinkMacSystemFont, sans-serif; color: #1D1D1F;
|
75 |
+
font-weight: bold; margin-bottom: 0;"> {model_name}</h1>
|
76 |
<p style="font-size: 1.2em; color: #6e6e73; font-family: -apple-system, BlinkMacSystemFont, sans-serif; margin-top: 5px;">
|
77 |
📱 Grounded Mobile UI Understanding with Multimodal LLMs.<br>
|
78 |
A new MLLM tailored for enhanced understanding of mobile UI screens, equipped with referring, grounding, and reasoning capabilities.
|
|
|
92 |
"""
|
93 |
|
94 |
latex_delimiters_set = [{
|
95 |
+
"left": "\\(",
|
96 |
+
"right": "\\)",
|
97 |
+
"display": False
|
98 |
+
}, {
|
99 |
+
"left": "\\begin{equation}",
|
100 |
+
"right": "\\end{equation}",
|
101 |
+
"display": True
|
102 |
+
}, {
|
103 |
+
"left": "\\begin{align}",
|
104 |
+
"right": "\\end{align}",
|
105 |
+
"display": True
|
106 |
+
}]
|
107 |
+
|
108 |
+
with gr.Blocks(title=model_name) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
gr.HTML(html)
|
110 |
with gr.Row():
|
111 |
with gr.Column(scale=3):
|
112 |
+
# Replace image_input with ImagePrompter
|
113 |
+
image_input = ImagePrompter(label="Upload Image & Draw Bounding Box")
|
114 |
+
text_input = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
|
115 |
+
model_dropdown = gr.Dropdown(
|
116 |
+
choices=[
|
117 |
+
"jadechoghari/Ferret-UI-Gemma2b",
|
118 |
+
"jadechoghari/Ferret-UI-Llama8b",
|
|
|
|
|
|
|
|
|
|
|
119 |
],
|
120 |
+
label="Model Path",
|
121 |
+
value="jadechoghari/Ferret-UI-Gemma2b"
|
122 |
)
|
123 |
+
temperature_input = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=0.2, label="Temperature")
|
124 |
+
top_p_input = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label="Top P")
|
125 |
+
max_new_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=512, label="Max New Tokens")
|
126 |
+
|
127 |
with gr.Column(scale=7):
|
128 |
+
chatbot = gr.Chatbot(
|
129 |
+
label="Chat with Ferret-UI",
|
130 |
+
height=400,
|
131 |
+
show_copy_button=True,
|
132 |
+
latex_delimiters=latex_delimiters_set,
|
133 |
+
type="tuples"
|
134 |
+
)
|
135 |
with gr.Row():
|
136 |
send_btn = gr.Button("Send", variant="primary")
|
137 |
clear_btn = gr.Button("Clear", variant="secondary")
|
138 |
|
139 |
send_click_event = send_btn.click(
|
140 |
+
inference_with_gradio,
|
141 |
+
[chatbot, image_input, text_input, model_dropdown, temperature_input, top_p_input, max_new_tokens_input],
|
142 |
+
chatbot
|
143 |
+
).then(
|
144 |
+
submit_chat,
|
145 |
+
[chatbot, text_input],
|
146 |
+
[chatbot, text_input]
|
147 |
+
)
|
148 |
+
|
149 |
submit_event = text_input.submit(
|
150 |
+
inference_with_gradio,
|
151 |
+
[chatbot, image_input, text_input, model_dropdown, temperature_input, top_p_input, max_new_tokens_input],
|
152 |
+
chatbot
|
153 |
+
).then(
|
154 |
+
submit_chat,
|
155 |
+
[chatbot, text_input],
|
156 |
+
[chatbot, text_input]
|
157 |
+
)
|
158 |
|
159 |
+
clear_btn.click(
|
160 |
+
clear_chat,
|
161 |
+
outputs=[chatbot, image_input, text_input, temperature_input, top_p_input, max_new_tokens_input]
|
162 |
+
)
|
163 |
|
164 |
+
demo.launch()
|
appv1.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from inference import inference_and_run
|
3 |
+
import spaces
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
import shutil
|
7 |
+
|
8 |
+
model_name = 'Ferret-UI'
|
9 |
+
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
10 |
+
|
11 |
+
@spaces.GPU()
|
12 |
+
def inference_with_gradio(chatbot, image, prompt, model_path, box=None, temperature=0.2, top_p=0.7, max_new_tokens=512):
|
13 |
+
dir_path = os.path.dirname(image)
|
14 |
+
# image_path = image
|
15 |
+
# Define the directory where you want to save the image (current directory)
|
16 |
+
filename = os.path.basename(image)
|
17 |
+
dir_path = "./"
|
18 |
+
|
19 |
+
# Create the new path for the file (in the current directory)
|
20 |
+
image_path = os.path.join(dir_path, filename)
|
21 |
+
shutil.copy(image, image_path)
|
22 |
+
print("filename path: ", filename)
|
23 |
+
if "gemma" in model_path.lower():
|
24 |
+
conv_mode = "ferret_gemma_instruct"
|
25 |
+
else:
|
26 |
+
conv_mode = "ferret_llama_3"
|
27 |
+
|
28 |
+
# inference_text = inference_and_run(
|
29 |
+
# image_path=image_path,
|
30 |
+
# prompt=prompt,
|
31 |
+
# conv_mode=conv_mode,
|
32 |
+
# model_path=model_path,
|
33 |
+
# box=box
|
34 |
+
# )
|
35 |
+
inference_text = inference_and_run(
|
36 |
+
image_path=filename, # double check this
|
37 |
+
image_dir=dir_path,
|
38 |
+
prompt=prompt,
|
39 |
+
model_path="jadechoghari/Ferret-UI-Gemma2b",
|
40 |
+
conv_mode=conv_mode,
|
41 |
+
temperature=temperature,
|
42 |
+
top_p=top_p,
|
43 |
+
box=box,
|
44 |
+
max_new_tokens=max_new_tokens,
|
45 |
+
# stop=stop # Assuming we want to process the image
|
46 |
+
)
|
47 |
+
if isinstance(inference_text, (list, tuple)):
|
48 |
+
inference_text = str(inference_text[0])
|
49 |
+
|
50 |
+
# Update chatbot history with new message pair
|
51 |
+
new_history = chatbot.copy() if chatbot else []
|
52 |
+
new_history.append((prompt, inference_text))
|
53 |
+
return new_history
|
54 |
+
|
55 |
+
def submit_chat(chatbot, text_input):
|
56 |
+
response = ''
|
57 |
+
# chatbot.append((text_input, response))
|
58 |
+
return chatbot, ''
|
59 |
+
|
60 |
+
def clear_chat():
|
61 |
+
return [], None, "", "", 0.2, 0.7, 512
|
62 |
+
|
63 |
+
|
64 |
+
html = f"""
|
65 |
+
<div style="text-align: center; padding: 20px;">
|
66 |
+
<div style="display: inline-block; background-color: #f5f5f7; padding: 20px; border-radius: 20px; box-shadow: 0px 6px 20px rgba(0, 0, 0, 0.1);">
|
67 |
+
<div style="display: flex; align-items: center;">
|
68 |
+
<img src='https://github.com/apple/ml-ferret/blob/main/ferretui/figs/ferretui_icon.png?raw=true' alt='Ferret-UI'
|
69 |
+
style='width: 80px; height: 80px; border-radius: 20px; box-shadow: 0px 8px 16px rgba(0, 0, 0, 0.2);'/>
|
70 |
+
<div style="margin-left: 15px;">
|
71 |
+
<h1 style="font-size: 2.8em; font-family: -apple-system, BlinkMacSystemFont, sans-serif; color: #1D1D1F;
|
72 |
+
font-weight: bold; margin-bottom: 0;"> {model_name}</h1>
|
73 |
+
<p style="font-size: 1.2em; color: #6e6e73; font-family: -apple-system, BlinkMacSystemFont, sans-serif; margin-top: 5px;">
|
74 |
+
📱 Grounded Mobile UI Understanding with Multimodal LLMs.<br>
|
75 |
+
A new MLLM tailored for enhanced understanding of mobile UI screens, equipped with referring, grounding, and reasoning capabilities.
|
76 |
+
</p>
|
77 |
+
<a href='https://huggingface.co/jadechoghari/Ferret-UI-Gemma2b' style='text-decoration: none;'>
|
78 |
+
<button style="background-color: #007aff; color: white; font-size: 1.2em; padding: 10px 20px; border-radius: 10px; border: none; margin-top: 10px; box-shadow: 0px 4px 12px rgba(0, 122, 255, 0.4); cursor: pointer;">
|
79 |
+
🤗 Try on Hugging Face
|
80 |
+
</button>
|
81 |
+
</a>
|
82 |
+
</div>
|
83 |
+
</div>
|
84 |
+
</div>
|
85 |
+
<p style="font-size: 1.2em; color: #86868B; font-family: -apple-system, BlinkMacSystemFont, sans-serif; margin-top: 30px;">
|
86 |
+
We release two Ferret-UI checkpoints, built on gemma-2b and Llama-3-8B models respectively, for public exploration. 🚀
|
87 |
+
</p>
|
88 |
+
</div>
|
89 |
+
"""
|
90 |
+
|
91 |
+
latex_delimiters_set = [{
|
92 |
+
"left": "\\(",
|
93 |
+
"right": "\\)",
|
94 |
+
"display": False
|
95 |
+
}, {
|
96 |
+
"left": "\\begin{equation}",
|
97 |
+
"right": "\\end{equation}",
|
98 |
+
"display": True
|
99 |
+
}, {
|
100 |
+
"left": "\\begin{align}",
|
101 |
+
"right": "\\end{align}",
|
102 |
+
"display": True
|
103 |
+
}]
|
104 |
+
|
105 |
+
# Set up UI components
|
106 |
+
image_input = gr.Image(label="Upload Image", type="filepath", height=350)
|
107 |
+
text_input = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
|
108 |
+
model_dropdown = gr.Dropdown(choices=[
|
109 |
+
"jadechoghari/Ferret-UI-Gemma2b",
|
110 |
+
"jadechoghari/Ferret-UI-Llama8b",
|
111 |
+
], label="Model Path", value="jadechoghari/Ferret-UI-Gemma2b")
|
112 |
+
|
113 |
+
bounding_box_input = gr.Textbox(placeholder="Optional bounding box (x1, y1, x2, y2)", label="Bounding Box (optional)")
|
114 |
+
# Adding Sliders for temperature, top_p, and max_new_tokens
|
115 |
+
temperature_input = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=0.2, label="Temperature")
|
116 |
+
top_p_input = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label="Top P")
|
117 |
+
max_new_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=512, label="Max New Tokens")
|
118 |
+
|
119 |
+
|
120 |
+
chatbot = gr.Chatbot(label="Chat with Ferret-UI", height=400, show_copy_button=True, latex_delimiters=latex_delimiters_set, type="tuples")
|
121 |
+
|
122 |
+
with gr.Blocks(title=model_name, theme=gr.themes.Ocean()) as demo:
|
123 |
+
gr.HTML(html)
|
124 |
+
with gr.Row():
|
125 |
+
with gr.Column(scale=3):
|
126 |
+
image_input.render()
|
127 |
+
text_input.render()
|
128 |
+
model_dropdown.render()
|
129 |
+
bounding_box_input.render()
|
130 |
+
temperature_input.render() # Render temperature input
|
131 |
+
top_p_input.render() # Render top_p input
|
132 |
+
max_new_tokens_input.render()
|
133 |
+
gr.Examples(
|
134 |
+
examples=[
|
135 |
+
["appstore_reminders.png", "Describe the image in details", "jadechoghari/Ferret-UI-Gemma2b", None],
|
136 |
+
["appstore_reminders.png", "What's inside the selected region?", "jadechoghari/Ferret-UI-Gemma2b", "189, 906, 404, 970"],
|
137 |
+
["appstore_reminders.png", "Where is the Game Tab?", "jadechoghari/Ferret-UI-Gemma2b", None],
|
138 |
+
],
|
139 |
+
inputs=[image_input, text_input, model_dropdown, bounding_box_input]
|
140 |
+
)
|
141 |
+
with gr.Column(scale=7):
|
142 |
+
chatbot.render()
|
143 |
+
with gr.Row():
|
144 |
+
send_btn = gr.Button("Send", variant="primary")
|
145 |
+
clear_btn = gr.Button("Clear", variant="secondary")
|
146 |
+
|
147 |
+
send_click_event = send_btn.click(
|
148 |
+
inference_with_gradio, [chatbot, image_input, text_input, model_dropdown, bounding_box_input, temperature_input, top_p_input, max_new_tokens_input], chatbot
|
149 |
+
).then(submit_chat, [chatbot, text_input], [chatbot, text_input])
|
150 |
+
submit_event = text_input.submit(
|
151 |
+
inference_with_gradio, [chatbot, image_input, text_input, model_dropdown, bounding_box_input, temperature_input, top_p_input, max_new_tokens_input], chatbot
|
152 |
+
).then(submit_chat, [chatbot, text_input], [chatbot, text_input])
|
153 |
+
|
154 |
+
clear_btn.click(clear_chat, outputs=[chatbot, image_input, text_input, bounding_box_input, temperature_input, top_p_input, max_new_tokens_input])
|
155 |
+
|
156 |
+
demo.launch()
|
clipboard.png
ADDED
Git LFS Details
|
eval.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
[{"id": 0, "image": "
|
|
|
1 |
+
[{"id": 0, "image": "temp_image.png", "image_h": 2532, "image_w": 1170, "conversations": [{"from": "human", "value": "<image>\nclassify this"}], "box_x1y1x2y2": [["455.0, 513.0, 729.0, 650.0"]]}]
|
eval_output.jsonl/0_of_1.jsonl
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"id": 0, "image_path": "
|
|
|
1 |
+
{"id": 0, "image_path": "temp_image.png", "prompt": "classify this", "text": "Today, 4+", "label": null}
|
temp_image.png
ADDED
Git LFS Details
|
Новый проект 1.png
ADDED
Git LFS Details
|
Новый проект 4.png
ADDED
Git LFS Details
|