Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from diffusers import StableDiffusionPipeline
|
2 |
from diffusers import StableDiffusionImg2ImgPipeline
|
3 |
import gradio as gr
|
@@ -14,7 +15,8 @@ models = [
|
|
14 |
"yuk/fuyuko-waifu-diffusion",
|
15 |
"AstraliteHeart/pony-diffusion",
|
16 |
"IfanSnek/JohnDiffusion",
|
17 |
-
"nousr/robo-diffusion"
|
|
|
18 |
]
|
19 |
|
20 |
prompt_prefixes = {
|
@@ -29,6 +31,7 @@ prompt_prefixes = {
|
|
29 |
models[8]: "",
|
30 |
models[9]: "",
|
31 |
models[10]: "",
|
|
|
32 |
}
|
33 |
|
34 |
current_model = models[0]
|
@@ -38,13 +41,14 @@ if torch.cuda.is_available():
|
|
38 |
|
39 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
40 |
|
41 |
-
def inference(model, prompt, img, guidance, steps):
|
|
|
42 |
if img is not None:
|
43 |
-
return img_inference(model, prompt, img, guidance, steps)
|
44 |
else:
|
45 |
-
return text_inference(model, prompt, guidance, steps)
|
46 |
|
47 |
-
def text_inference(model, prompt, guidance, steps):
|
48 |
|
49 |
global current_model
|
50 |
global pipe
|
@@ -56,10 +60,16 @@ def text_inference(model, prompt, guidance, steps):
|
|
56 |
pipe = pipe.to("cuda")
|
57 |
|
58 |
prompt = prompt_prefixes[current_model] + prompt
|
59 |
-
image = pipe(
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
return image
|
61 |
|
62 |
-
def img_inference(model, prompt, img, guidance, steps):
|
63 |
|
64 |
global current_model
|
65 |
global pipe
|
@@ -76,10 +86,11 @@ def img_inference(model, prompt, img, guidance, steps):
|
|
76 |
prompt,
|
77 |
init_image=img,
|
78 |
num_inference_steps=int(steps),
|
79 |
-
strength=
|
80 |
guidance_scale=guidance,
|
81 |
width=512,
|
82 |
-
height=512
|
|
|
83 |
return image
|
84 |
|
85 |
|
@@ -118,7 +129,7 @@ with gr.Blocks(css=css) as demo:
|
|
118 |
</div>
|
119 |
<p>
|
120 |
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
|
121 |
-
<a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/IfanSnek/JohnDiffusion">John</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a
|
122 |
</p>
|
123 |
</div>
|
124 |
"""
|
@@ -126,17 +137,25 @@ with gr.Blocks(css=css) as demo:
|
|
126 |
with gr.Row():
|
127 |
|
128 |
with gr.Column():
|
|
|
129 |
model = gr.Dropdown(label="Model", choices=models, value=models[0])
|
130 |
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
run = gr.Button(value="Run")
|
135 |
gr.Markdown(f"Running on: {device}")
|
136 |
with gr.Column():
|
137 |
image_out = gr.Image(height=512)
|
138 |
|
139 |
-
|
|
|
140 |
gr.Examples([
|
141 |
[models[0], "jason bateman disassembling the demon core", 7.5, 50],
|
142 |
[models[3], "portrait of dwayne johnson", 7.0, 75],
|
|
|
1 |
+
from email import generator
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
from diffusers import StableDiffusionImg2ImgPipeline
|
4 |
import gradio as gr
|
|
|
15 |
"yuk/fuyuko-waifu-diffusion",
|
16 |
"AstraliteHeart/pony-diffusion",
|
17 |
"IfanSnek/JohnDiffusion",
|
18 |
+
"nousr/robo-diffusion",
|
19 |
+
"DGSpitzer/Cyberpunk-Anime-Diffusion"
|
20 |
]
|
21 |
|
22 |
prompt_prefixes = {
|
|
|
31 |
models[8]: "",
|
32 |
models[9]: "",
|
33 |
models[10]: "",
|
34 |
+
models[11]: "dgs illustration style ",
|
35 |
}
|
36 |
|
37 |
current_model = models[0]
|
|
|
41 |
|
42 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
43 |
|
44 |
+
def inference(model, prompt, img, strength, guidance, steps, seed):
|
45 |
+
generator = torch.manual_seed(seed) if seed != 0 else None
|
46 |
if img is not None:
|
47 |
+
return img_inference(model, prompt, img, strength, guidance, steps, generator)
|
48 |
else:
|
49 |
+
return text_inference(model, prompt, guidance, steps, generator)
|
50 |
|
51 |
+
def text_inference(model, prompt, guidance, steps, generator):
|
52 |
|
53 |
global current_model
|
54 |
global pipe
|
|
|
60 |
pipe = pipe.to("cuda")
|
61 |
|
62 |
prompt = prompt_prefixes[current_model] + prompt
|
63 |
+
image = pipe(
|
64 |
+
prompt,
|
65 |
+
num_inference_steps=int(steps),
|
66 |
+
guidance_scale=guidance,
|
67 |
+
width=512,
|
68 |
+
height=512,
|
69 |
+
generator=generator).images[0]
|
70 |
return image
|
71 |
|
72 |
+
def img_inference(model, prompt, img, strength, guidance, steps, generator):
|
73 |
|
74 |
global current_model
|
75 |
global pipe
|
|
|
86 |
prompt,
|
87 |
init_image=img,
|
88 |
num_inference_steps=int(steps),
|
89 |
+
strength=strength,
|
90 |
guidance_scale=guidance,
|
91 |
width=512,
|
92 |
+
height=512,
|
93 |
+
generator=generator).images[0]
|
94 |
return image
|
95 |
|
96 |
|
|
|
129 |
</div>
|
130 |
<p>
|
131 |
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
|
132 |
+
<a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/IfanSnek/JohnDiffusion">John</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>
|
133 |
</p>
|
134 |
</div>
|
135 |
"""
|
|
|
137 |
with gr.Row():
|
138 |
|
139 |
with gr.Column():
|
140 |
+
|
141 |
model = gr.Dropdown(label="Model", choices=models, value=models[0])
|
142 |
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
|
143 |
+
with gr.Accordion("Image to image (optional)", open=False):
|
144 |
+
image = gr.Image(label="Image", height=256, tool="editor")
|
145 |
+
strength = gr.Slider(label="Strength", minimum=0, maximum=1, step=0.01, value=0.75)
|
146 |
+
|
147 |
+
with gr.Accordion("Advanced options", open=False):
|
148 |
+
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
149 |
+
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
|
150 |
+
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
151 |
+
|
152 |
run = gr.Button(value="Run")
|
153 |
gr.Markdown(f"Running on: {device}")
|
154 |
with gr.Column():
|
155 |
image_out = gr.Image(height=512)
|
156 |
|
157 |
+
prompt.submit(inference, inputs=[model, image, strength, prompt, guidance, steps, seed], outputs=image_out)
|
158 |
+
run.click(inference, inputs=[model, image, strength, prompt, guidance, steps, seed], outputs=image_out)
|
159 |
gr.Examples([
|
160 |
[models[0], "jason bateman disassembling the demon core", 7.5, 50],
|
161 |
[models[3], "portrait of dwayne johnson", 7.0, 75],
|