Spaces:
Running
Running
Ming Li
commited on
Commit
•
8fad46e
1
Parent(s):
70b97dd
fix bugs
Browse files- app.py +8 -8
- app_canny.py +1 -1
- app_depth.py +3 -3
- app_ip2p.py +1 -1
- app_lineart.py +3 -3
- app_mlsd.py +1 -1
- app_normal.py +1 -1
- app_openpose.py +1 -1
- app_scribble.py +1 -1
- app_scribble_interactive.py +1 -1
- app_segmentation.py +4 -4
- app_shuffle.py +1 -1
- app_softedge.py +3 -3
- images/depth_demo.png +0 -0
- images/hed_demo.jpeg +0 -0
- images/lineart_demo.jpg +0 -0
- images/seg_demo.png +0 -0
- model.py +40 -5
app.py
CHANGED
@@ -36,16 +36,16 @@ with gr.Blocks(css="style.css") as demo:
|
|
36 |
)
|
37 |
|
38 |
with gr.Tabs():
|
39 |
-
with gr.TabItem("Canny"):
|
40 |
-
create_demo_canny(model.process_canny)
|
41 |
-
with gr.TabItem("SoftEdge"):
|
42 |
-
create_demo_softedge(model.process_softedge)
|
43 |
-
with gr.TabItem("Segmentation"):
|
44 |
-
create_demo_segmentation(model.process_segmentation)
|
45 |
-
with gr.TabItem("Depth"):
|
46 |
-
create_demo_depth(model.process_depth)
|
47 |
with gr.TabItem("Lineart"):
|
48 |
create_demo_lineart(model.process_lineart)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
with gr.Accordion(label="Base model", open=False):
|
51 |
with gr.Row():
|
|
|
36 |
)
|
37 |
|
38 |
with gr.Tabs():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
with gr.TabItem("Lineart"):
|
40 |
create_demo_lineart(model.process_lineart)
|
41 |
+
with gr.TabItem("Depth"):
|
42 |
+
create_demo_depth(model.process_depth)
|
43 |
+
with gr.TabItem("Segmentation"):
|
44 |
+
create_demo_segmentation(model.process_segmentation)
|
45 |
+
with gr.TabItem("SoftEdge"):
|
46 |
+
create_demo_softedge(model.process_softedge)
|
47 |
+
with gr.TabItem("Canny"):
|
48 |
+
create_demo_canny(model.process_canny)
|
49 |
|
50 |
with gr.Accordion(label="Base model", open=False):
|
51 |
with gr.Row():
|
app_canny.py
CHANGED
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_depth.py
CHANGED
@@ -16,8 +16,8 @@ def create_demo(process):
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
-
image = gr.Image()
|
20 |
-
prompt = gr.Textbox(label="Prompt")
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
+
image = gr.Image(value='images/depth_demo.png')
|
20 |
+
prompt = gr.Textbox(label="Prompt", value='heart, mountains, and nature image')
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_ip2p.py
CHANGED
@@ -34,7 +34,7 @@ def create_demo(process):
|
|
34 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
35 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
36 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
37 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
38 |
n_prompt = gr.Textbox(
|
39 |
label="Negative prompt",
|
40 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
34 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
35 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
36 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
37 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
38 |
n_prompt = gr.Textbox(
|
39 |
label="Negative prompt",
|
40 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_lineart.py
CHANGED
@@ -16,8 +16,8 @@ def create_demo(process):
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
-
image = gr.Image()
|
20 |
-
prompt = gr.Textbox(label="Prompt")
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
@@ -50,7 +50,7 @@ def create_demo(process):
|
|
50 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
51 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
52 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
53 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
54 |
n_prompt = gr.Textbox(
|
55 |
label="Negative prompt",
|
56 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
+
image = gr.Image(value='images/lineart_demo.jpg')
|
20 |
+
prompt = gr.Textbox(label="Prompt", value='Picture Of Looking Through A View Finder')
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
|
|
50 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
51 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
52 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
53 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
54 |
n_prompt = gr.Textbox(
|
55 |
label="Negative prompt",
|
56 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_mlsd.py
CHANGED
@@ -43,7 +43,7 @@ def create_demo(process):
|
|
43 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
44 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
45 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
46 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
47 |
n_prompt = gr.Textbox(
|
48 |
label="Negative prompt",
|
49 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
43 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
44 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
45 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
46 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
47 |
n_prompt = gr.Textbox(
|
48 |
label="Negative prompt",
|
49 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_normal.py
CHANGED
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_openpose.py
CHANGED
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_scribble.py
CHANGED
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_scribble_interactive.py
CHANGED
@@ -54,7 +54,7 @@ def create_demo(process):
|
|
54 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
55 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
56 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
57 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
58 |
n_prompt = gr.Textbox(
|
59 |
label="Negative prompt",
|
60 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
54 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
55 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
56 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
57 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
58 |
n_prompt = gr.Textbox(
|
59 |
label="Negative prompt",
|
60 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_segmentation.py
CHANGED
@@ -16,12 +16,12 @@ def create_demo(process):
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
-
image = gr.Image()
|
20 |
-
prompt = gr.Textbox(label="Prompt")
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
24 |
-
label="Preprocessor", choices=["UPerNet", "None"], type="value", value="
|
25 |
)
|
26 |
num_samples = gr.Slider(
|
27 |
label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
|
@@ -40,7 +40,7 @@ def create_demo(process):
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
+
image = gr.Image(value='images/seg_demo.png')
|
20 |
+
prompt = gr.Textbox(label="Prompt", value='A large building with a pointed roof and several chimneys.')
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
24 |
+
label="Preprocessor", choices=["UPerNet", "None"], type="value", value="None"
|
25 |
)
|
26 |
num_samples = gr.Slider(
|
27 |
label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
|
|
|
40 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
41 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
42 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
43 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
44 |
n_prompt = gr.Textbox(
|
45 |
label="Negative prompt",
|
46 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_shuffle.py
CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
|
|
37 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
38 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
39 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
40 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
41 |
n_prompt = gr.Textbox(
|
42 |
label="Negative prompt",
|
43 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
37 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
38 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
39 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
40 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
41 |
n_prompt = gr.Textbox(
|
42 |
label="Negative prompt",
|
43 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
app_softedge.py
CHANGED
@@ -16,8 +16,8 @@ def create_demo(process):
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
-
image = gr.Image()
|
20 |
-
prompt = gr.Textbox(label="Prompt")
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
@@ -49,7 +49,7 @@ def create_demo(process):
|
|
49 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
50 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
51 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
52 |
-
a_prompt = gr.Textbox(label="Additional prompt", value="
|
53 |
n_prompt = gr.Textbox(
|
54 |
label="Negative prompt",
|
55 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
with gr.Column():
|
19 |
+
image = gr.Image(value='images/hed_demo.jpeg')
|
20 |
+
prompt = gr.Textbox(label="Prompt", value='Language trip to Laon')
|
21 |
run_button = gr.Button("Run")
|
22 |
with gr.Accordion("Advanced options", open=False):
|
23 |
preprocessor_name = gr.Radio(
|
|
|
49 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
50 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
51 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
52 |
+
a_prompt = gr.Textbox(label="Additional prompt", value="high-quality, extremely detailed, 4K")
|
53 |
n_prompt = gr.Textbox(
|
54 |
label="Negative prompt",
|
55 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
images/depth_demo.png
ADDED
images/hed_demo.jpeg
ADDED
images/lineart_demo.jpg
ADDED
images/seg_demo.png
ADDED
model.py
CHANGED
@@ -159,7 +159,12 @@ class Model:
|
|
159 |
guidance_scale=guidance_scale,
|
160 |
seed=seed,
|
161 |
)
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
@torch.inference_mode()
|
165 |
def process_mlsd(
|
@@ -351,7 +356,15 @@ class Model:
|
|
351 |
guidance_scale=guidance_scale,
|
352 |
seed=seed,
|
353 |
)
|
354 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
|
356 |
@torch.inference_mode()
|
357 |
def process_openpose(
|
@@ -442,7 +455,15 @@ class Model:
|
|
442 |
guidance_scale=guidance_scale,
|
443 |
seed=seed,
|
444 |
)
|
445 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
447 |
@torch.inference_mode()
|
448 |
def process_depth(
|
@@ -487,7 +508,14 @@ class Model:
|
|
487 |
guidance_scale=guidance_scale,
|
488 |
seed=seed,
|
489 |
)
|
490 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
491 |
|
492 |
@torch.inference_mode()
|
493 |
def process_normal(
|
@@ -590,7 +618,14 @@ class Model:
|
|
590 |
guidance_scale=guidance_scale,
|
591 |
seed=seed,
|
592 |
)
|
593 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
594 |
|
595 |
@torch.inference_mode()
|
596 |
def process_shuffle(
|
|
|
159 |
guidance_scale=guidance_scale,
|
160 |
seed=seed,
|
161 |
)
|
162 |
+
conditions_of_generated_imgs = [
|
163 |
+
self.preprocessor(
|
164 |
+
image=x, low_threshold=low_threshold, high_threshold=high_threshold, detect_resolution=image_resolution
|
165 |
+
) for x in results
|
166 |
+
]
|
167 |
+
return [control_image] * num_images + results + conditions_of_generated_imgs
|
168 |
|
169 |
@torch.inference_mode()
|
170 |
def process_mlsd(
|
|
|
356 |
guidance_scale=guidance_scale,
|
357 |
seed=seed,
|
358 |
)
|
359 |
+
conditions_of_generated_imgs = [
|
360 |
+
self.preprocessor(
|
361 |
+
image=x,
|
362 |
+
image_resolution=image_resolution,
|
363 |
+
detect_resolution=preprocess_resolution,
|
364 |
+
scribble=safe,
|
365 |
+
) for x in results
|
366 |
+
]
|
367 |
+
return [control_image] * num_images + results + conditions_of_generated_imgs
|
368 |
|
369 |
@torch.inference_mode()
|
370 |
def process_openpose(
|
|
|
455 |
guidance_scale=guidance_scale,
|
456 |
seed=seed,
|
457 |
)
|
458 |
+
self.preprocessor.load('UPerNet')
|
459 |
+
conditions_of_generated_imgs = [
|
460 |
+
self.preprocessor(
|
461 |
+
image=np.array(x),
|
462 |
+
image_resolution=image_resolution,
|
463 |
+
detect_resolution=preprocess_resolution,
|
464 |
+
) for x in results
|
465 |
+
]
|
466 |
+
return [control_image] * num_images + results + conditions_of_generated_imgs
|
467 |
|
468 |
@torch.inference_mode()
|
469 |
def process_depth(
|
|
|
508 |
guidance_scale=guidance_scale,
|
509 |
seed=seed,
|
510 |
)
|
511 |
+
conditions_of_generated_imgs = [
|
512 |
+
self.preprocessor(
|
513 |
+
image=x,
|
514 |
+
image_resolution=image_resolution,
|
515 |
+
detect_resolution=preprocess_resolution,
|
516 |
+
) for x in results
|
517 |
+
]
|
518 |
+
return [control_image] * num_images + results + conditions_of_generated_imgs
|
519 |
|
520 |
@torch.inference_mode()
|
521 |
def process_normal(
|
|
|
618 |
guidance_scale=guidance_scale,
|
619 |
seed=seed,
|
620 |
)
|
621 |
+
conditions_of_generated_imgs = [
|
622 |
+
self.preprocessor(
|
623 |
+
image=x,
|
624 |
+
image_resolution=image_resolution,
|
625 |
+
detect_resolution=preprocess_resolution,
|
626 |
+
) for x in results
|
627 |
+
]
|
628 |
+
return [control_image] * num_images + results + conditions_of_generated_imgs
|
629 |
|
630 |
@torch.inference_mode()
|
631 |
def process_shuffle(
|