Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -125,7 +125,9 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale
|
|
125 |
def update_scales(x,y,prompt,seed, steps, guidance_scale,
|
126 |
avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2,
|
127 |
img2img_type = None, img = None,
|
128 |
-
controlnet_scale= None, ip_adapter_scale=None
|
|
|
|
|
129 |
avg_diff = (avg_diff_x_1.cuda(), avg_diff_x_2.cuda())
|
130 |
avg_diff_2nd = (avg_diff_y_1.cuda(), avg_diff_y_2.cuda())
|
131 |
if img2img_type=="controlnet canny" and img is not None:
|
@@ -133,6 +135,8 @@ def update_scales(x,y,prompt,seed, steps, guidance_scale,
|
|
133 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
134 |
elif img2img_type=="ip adapter" and img is not None:
|
135 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
|
|
|
|
136 |
else:
|
137 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
138 |
return image
|
@@ -331,7 +335,7 @@ with gr.Blocks(css=css) as demo:
|
|
331 |
|
332 |
image_inv.change(fn=reset_do_inversion, outputs=[do_inversion]).then(fn=invert, inputs=[image_inv], outputs=[init_latents,zs])
|
333 |
submit_inv.click(fn=generate,
|
334 |
-
inputs=[slider_x_inv, slider_y_inv, prompt_inv, seed_inv, iterations_inv, steps_inv, guidance_scale_inv, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
|
335 |
outputs=[x_inv, y_inv, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image_inv])
|
336 |
|
337 |
generate_butt.click(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
|
|
|
125 |
def update_scales(x,y,prompt,seed, steps, guidance_scale,
|
126 |
avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2,
|
127 |
img2img_type = None, img = None,
|
128 |
+
controlnet_scale= None, ip_adapter_scale=None,
|
129 |
+
edit_threshold=None, edit_guidance_scale = None,
|
130 |
+
init_latents=None, zs=None):
|
131 |
avg_diff = (avg_diff_x_1.cuda(), avg_diff_x_2.cuda())
|
132 |
avg_diff_2nd = (avg_diff_y_1.cuda(), avg_diff_y_2.cuda())
|
133 |
if img2img_type=="controlnet canny" and img is not None:
|
|
|
135 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
136 |
elif img2img_type=="ip adapter" and img is not None:
|
137 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
138 |
+
elif img2img_type=="inversion":
|
139 |
+
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1), init_latents = init_latents, zs=zs)
|
140 |
else:
|
141 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
|
142 |
return image
|
|
|
335 |
|
336 |
image_inv.change(fn=reset_do_inversion, outputs=[do_inversion]).then(fn=invert, inputs=[image_inv], outputs=[init_latents,zs])
|
337 |
submit_inv.click(fn=generate,
|
338 |
+
inputs=[slider_x_inv, slider_y_inv, prompt_inv, seed_inv, iterations_inv, steps_inv, guidance_scale_inv, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, img2img_type_inv, image, controlnet_conditioning_scale, ip_adapter_scale ,edit_threshold, edit_guidance_scale, init_latents, zs],
|
339 |
outputs=[x_inv, y_inv, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image_inv])
|
340 |
|
341 |
generate_butt.click(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
|