Update app.py
Browse files
app.py
CHANGED
@@ -27,13 +27,13 @@ def infer(prompt_part1, prompt_part2, prompt_part3, prompt_part4, prompt_part5,
|
|
27 |
generator = torch.Generator().manual_seed(seed)
|
28 |
|
29 |
image = pipe(
|
30 |
-
prompt
|
31 |
-
negative_prompt
|
32 |
-
guidance_scale
|
33 |
-
num_inference_steps
|
34 |
-
width
|
35 |
-
height
|
36 |
-
generator
|
37 |
).images[0]
|
38 |
|
39 |
return image
|
@@ -44,7 +44,7 @@ examples = [
|
|
44 |
"A delicious ceviche cheesecake slice",
|
45 |
]
|
46 |
|
47 |
-
css="""
|
48 |
#col-container {
|
49 |
margin: 0 auto;
|
50 |
max-width: 520px;
|
@@ -66,10 +66,17 @@ with gr.Blocks(css=css) as demo:
|
|
66 |
|
67 |
with gr.Row():
|
68 |
|
69 |
-
prompt_part1 =
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
prompt_part2 = gr.
|
73 |
label="Prompt Part 2",
|
74 |
show_label=False,
|
75 |
max_lines=1,
|
@@ -77,7 +84,7 @@ with gr.Blocks(css=css) as demo:
|
|
77 |
container=False,
|
78 |
)
|
79 |
|
80 |
-
prompt_part3 = gr.
|
81 |
label="Prompt Part 3",
|
82 |
show_label=False,
|
83 |
max_lines=1,
|
@@ -85,7 +92,7 @@ with gr.Blocks(css=css) as demo:
|
|
85 |
container=False,
|
86 |
)
|
87 |
|
88 |
-
prompt_part4 = gr.
|
89 |
label="Prompt Part 4",
|
90 |
show_label=False,
|
91 |
max_lines=1,
|
@@ -93,8 +100,15 @@ with gr.Blocks(css=css) as demo:
|
|
93 |
container=False,
|
94 |
)
|
95 |
|
96 |
-
prompt_part5 =
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
run_button = gr.Button("Run", scale=0)
|
100 |
|
@@ -102,7 +116,7 @@ with gr.Blocks(css=css) as demo:
|
|
102 |
|
103 |
with gr.Accordion("Advanced Settings", open=False):
|
104 |
|
105 |
-
negative_prompt = gr.
|
106 |
label="Negative prompt",
|
107 |
max_lines=1,
|
108 |
placeholder="Enter a negative prompt",
|
@@ -156,14 +170,14 @@ with gr.Blocks(css=css) as demo:
|
|
156 |
)
|
157 |
|
158 |
gr.Examples(
|
159 |
-
examples
|
160 |
-
inputs
|
161 |
)
|
162 |
|
163 |
run_button.click(
|
164 |
-
fn
|
165 |
-
inputs
|
166 |
-
outputs
|
167 |
)
|
168 |
|
169 |
demo.queue().launch()
|
|
|
27 |
generator = torch.Generator().manual_seed(seed)
|
28 |
|
29 |
image = pipe(
|
30 |
+
prompt=prompt,
|
31 |
+
negative_prompt=negative_prompt,
|
32 |
+
guidance_scale=guidance_scale,
|
33 |
+
num_inference_steps=num_inference_steps,
|
34 |
+
width=width,
|
35 |
+
height=height,
|
36 |
+
generator=generator
|
37 |
).images[0]
|
38 |
|
39 |
return image
|
|
|
44 |
"A delicious ceviche cheesecake slice",
|
45 |
]
|
46 |
|
47 |
+
css = """
|
48 |
#col-container {
|
49 |
margin: 0 auto;
|
50 |
max-width: 520px;
|
|
|
66 |
|
67 |
with gr.Row():
|
68 |
|
69 |
+
prompt_part1 = gr.Textbox(
|
70 |
+
value="a single",
|
71 |
+
label="Prompt Part 1",
|
72 |
+
show_label=False,
|
73 |
+
interactive=False,
|
74 |
+
container=False,
|
75 |
+
elem_id="prompt_part1"
|
76 |
+
)
|
77 |
+
gr.Markdown(f"**Prompt Part 1:** a single")
|
78 |
|
79 |
+
prompt_part2 = gr.Textbox(
|
80 |
label="Prompt Part 2",
|
81 |
show_label=False,
|
82 |
max_lines=1,
|
|
|
84 |
container=False,
|
85 |
)
|
86 |
|
87 |
+
prompt_part3 = gr.Textbox(
|
88 |
label="Prompt Part 3",
|
89 |
show_label=False,
|
90 |
max_lines=1,
|
|
|
92 |
container=False,
|
93 |
)
|
94 |
|
95 |
+
prompt_part4 = gr.Textbox(
|
96 |
label="Prompt Part 4",
|
97 |
show_label=False,
|
98 |
max_lines=1,
|
|
|
100 |
container=False,
|
101 |
)
|
102 |
|
103 |
+
prompt_part5 = gr.Textbox(
|
104 |
+
value="hanging on the plain grey wall",
|
105 |
+
label="Prompt Part 5",
|
106 |
+
show_label=False,
|
107 |
+
interactive=False,
|
108 |
+
container=False,
|
109 |
+
elem_id="prompt_part5"
|
110 |
+
)
|
111 |
+
gr.Markdown(f"**Prompt Part 5:** hanging on the plain grey wall")
|
112 |
|
113 |
run_button = gr.Button("Run", scale=0)
|
114 |
|
|
|
116 |
|
117 |
with gr.Accordion("Advanced Settings", open=False):
|
118 |
|
119 |
+
negative_prompt = gr.Textbox(
|
120 |
label="Negative prompt",
|
121 |
max_lines=1,
|
122 |
placeholder="Enter a negative prompt",
|
|
|
170 |
)
|
171 |
|
172 |
gr.Examples(
|
173 |
+
examples=examples,
|
174 |
+
inputs=[prompt_part2]
|
175 |
)
|
176 |
|
177 |
run_button.click(
|
178 |
+
fn=infer,
|
179 |
+
inputs=[prompt_part1, prompt_part2, prompt_part3, prompt_part4, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
180 |
+
outputs=[result]
|
181 |
)
|
182 |
|
183 |
demo.queue().launch()
|