Spaces:
Running
Running
chore: update description
Browse files
app.py
CHANGED
@@ -12,16 +12,35 @@ import gradio as gr
|
|
12 |
|
13 |
from modeling_siglip import SiglipForImageClassification
|
14 |
|
15 |
-
MODEL_NAME = "
|
16 |
PROCESSOR_NAME = MODEL_NAME
|
17 |
HF_TOKEN = os.environ["HF_READ_TOKEN"]
|
18 |
|
19 |
EXAMPLES = [["./images/sample.jpg"], ["./images/sample2.webp"]]
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
model = SiglipForImageClassification.from_pretrained(MODEL_NAME, token=HF_TOKEN)
|
22 |
-
# model = torch.compile(model)
|
23 |
processor = AutoImageProcessor.from_pretrained(PROCESSOR_NAME, token=HF_TOKEN)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def compose_text(results: dict[str, float], threshold: float = 0.3):
|
27 |
return ", ".join(
|
@@ -65,18 +84,7 @@ css = """\
|
|
65 |
|
66 |
def demo():
|
67 |
with gr.Blocks(css=css) as ui:
|
68 |
-
gr.Markdown(
|
69 |
-
"""\
|
70 |
-
## SigLIP Tagger Test 3
|
71 |
-
An experimental model for tagging danbooru tags of images using SigLIP.
|
72 |
-
|
73 |
-
Models:
|
74 |
-
- (soon)
|
75 |
-
|
76 |
-
Example images by NovelAI and niji・journey.
|
77 |
-
|
78 |
-
"""
|
79 |
-
)
|
80 |
|
81 |
with gr.Row():
|
82 |
with gr.Column():
|
|
|
12 |
|
13 |
from modeling_siglip import SiglipForImageClassification
|
14 |
|
15 |
+
MODEL_NAME = os.environ["MODEL_NAME"]
|
16 |
PROCESSOR_NAME = MODEL_NAME
|
17 |
HF_TOKEN = os.environ["HF_READ_TOKEN"]
|
18 |
|
19 |
EXAMPLES = [["./images/sample.jpg"], ["./images/sample2.webp"]]
|
20 |
|
21 |
+
README_MD = """\
|
22 |
+
## SigLIP Tagger Test 3
|
23 |
+
An experimental model for tagging danbooru tags of images using SigLIP.
|
24 |
+
|
25 |
+
Model(s):
|
26 |
+
- [p1atdev/siglip-tagger-test-3](https://huggingface.co/p1atdev/siglip-tagger-test-3)
|
27 |
+
|
28 |
+
Example images by NovelAI and niji・journey.
|
29 |
+
"""
|
30 |
+
|
31 |
model = SiglipForImageClassification.from_pretrained(MODEL_NAME, token=HF_TOKEN)
|
|
|
32 |
processor = AutoImageProcessor.from_pretrained(PROCESSOR_NAME, token=HF_TOKEN)
|
33 |
|
34 |
+
try:
|
35 |
+
print("torch.compile")
|
36 |
+
model = torch.compile(model)
|
37 |
+
# warmup
|
38 |
+
print("warming up...")
|
39 |
+
model(**processor(Image.open(EXAMPLES[0][0]), return_tensors="pt"))
|
40 |
+
print("done")
|
41 |
+
except:
|
42 |
+
print("torch.compile not supported")
|
43 |
+
|
44 |
|
45 |
def compose_text(results: dict[str, float], threshold: float = 0.3):
|
46 |
return ", ".join(
|
|
|
84 |
|
85 |
def demo():
|
86 |
with gr.Blocks(css=css) as ui:
|
87 |
+
gr.Markdown(README_MD)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
with gr.Row():
|
90 |
with gr.Column():
|