Spaces:
Runtime error
Runtime error
yjernite
commited on
Commit
•
190395c
1
Parent(s):
2f582d1
context
Browse files
app.py
CHANGED
@@ -6,7 +6,37 @@ import plotly.graph_objects as go
|
|
6 |
import plotly.express as px
|
7 |
import operator
|
8 |
|
9 |
-
TITLE = "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
clusters_12 = json.load(open("clusters/id_all_blip_clusters_12.json"))
|
11 |
clusters_24 = json.load(open("clusters/id_all_blip_clusters_24.json"))
|
12 |
clusters_48 = json.load(open("clusters/id_all_blip_clusters_48.json"))
|
@@ -180,13 +210,11 @@ def show_cluster(cl_id, num_clusters):
|
|
180 |
|
181 |
|
182 |
with gr.Blocks(title=TITLE) as demo:
|
183 |
-
gr.Markdown(
|
184 |
-
gr.
|
185 |
-
"
|
186 |
-
)
|
187 |
-
|
188 |
-
"See the results on how the images from different prompts cluster together below."
|
189 |
-
)
|
190 |
gr.HTML(
|
191 |
"""<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
|
192 |
)
|
@@ -199,9 +227,7 @@ with gr.Blocks(title=TITLE) as demo:
|
|
199 |
with gr.Row():
|
200 |
with gr.Column():
|
201 |
cluster_id = gr.Dropdown(
|
202 |
-
choices=dropdown_descs[
|
203 |
-
num_clusters.value
|
204 |
-
], # [i for i in range(num_clusters.value)],
|
205 |
value=0,
|
206 |
label="Select cluster to visualize:",
|
207 |
)
|
|
|
6 |
import plotly.express as px
|
7 |
import operator
|
8 |
|
9 |
+
TITLE = "Identity Representation in Diffusion Models"
|
10 |
+
|
11 |
+
_INTRO = """
|
12 |
+
# Identity Representation in Diffusion Models
|
13 |
+
|
14 |
+
Explore the data generated from [DiffusionBiasExplorer](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer)!
|
15 |
+
This demo showcases patterns in the images generated from different prompts input to Stable Diffusion and Dalle-2 systems.
|
16 |
+
"""
|
17 |
+
|
18 |
+
_CONTEXT = """
|
19 |
+
##### How do diffusion-based models represent gender and ethnicity?
|
20 |
+
|
21 |
+
In order to evaluate the *social biases* that Text-to-Image (TTI) systems may reproduce or exacerbate,
|
22 |
+
we need to first understand how the visual representations they generate relate to notions of gender and ethnicity.
|
23 |
+
These two aspects of a person's identity, however, ar known as **socialy constructed characteristics**:
|
24 |
+
that is to say, gender and ethnicity only exist in interactions between people, they do not have an independent existence based solely on physical (or visual) attributes.
|
25 |
+
This means that while we can characterize trends in how the models associate visual features with specific *identity terms in the generation prompts*,
|
26 |
+
we should not assign a specific gender or ethnicity to a synthetic figure generated by an ML model.
|
27 |
+
|
28 |
+
In this app, we instead take a 2-step clustering-based approach. First, we generate 680 images for each model by varying mentions of terms that denote gender or ethnicity in the prompts.
|
29 |
+
Then, we use a [VQA-based model](https://huggingface.co/Salesforce/blip-vqa-base) to cluster these images at different granularities (12, 24, or 48 clusters).
|
30 |
+
Exploring these clusters allows us to examine trends in the models' associations between visual features and textual representation of social features.
|
31 |
+
We encourage users to take advantage of this app to explore those trends, for example through the lens of the following questions:
|
32 |
+
- Find the cluster that has the most prompts denoting a gender or ethnicity that you identify with. Do you think the generated images look like you?
|
33 |
+
- Find two clusters that have a similar distribution of gender terms but different distributions of ethnicity terms. Do you see any meaningful differences in how gender is visually represented?
|
34 |
+
- Do you find that some ethnicity terms lead to more stereotypical visual representations than others?
|
35 |
+
- Do you find that some gender terms lead to more stereotypical visual representations than others?
|
36 |
+
|
37 |
+
These questions only scratch the surface of what we can learn from demos like this one, let us know what you find [in the discussions tab](https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering/discussions), or if you think of other relevant questions!
|
38 |
+
"""
|
39 |
+
|
40 |
clusters_12 = json.load(open("clusters/id_all_blip_clusters_12.json"))
|
41 |
clusters_24 = json.load(open("clusters/id_all_blip_clusters_24.json"))
|
42 |
clusters_48 = json.load(open("clusters/id_all_blip_clusters_48.json"))
|
|
|
210 |
|
211 |
|
212 |
with gr.Blocks(title=TITLE) as demo:
|
213 |
+
gr.Markdown(_INTRO)
|
214 |
+
with gr.Accordion(
|
215 |
+
"How do diffusion-based models represent gender and ethnicity?", open=True
|
216 |
+
):
|
217 |
+
gr.Markdown(_CONTEXT)
|
|
|
|
|
218 |
gr.HTML(
|
219 |
"""<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
|
220 |
)
|
|
|
227 |
with gr.Row():
|
228 |
with gr.Column():
|
229 |
cluster_id = gr.Dropdown(
|
230 |
+
choices=dropdown_descs[num_clusters.value],
|
|
|
|
|
231 |
value=0,
|
232 |
label="Select cluster to visualize:",
|
233 |
)
|