yyk19 commited on
Commit
896a210
1 Parent(s): 5b5da1b

update the interface design.

Browse files
Files changed (2) hide show
  1. app.py +6 -4
  2. app_old.py +0 -176
app.py CHANGED
@@ -97,6 +97,8 @@ def load_ckpt(model_ckpt = "LAION-Glyph-10M-Epoch-5"):
97
  torch.cuda.empty_cache()
98
  time.sleep(2)
99
  print("empty the cuda cache")
 
 
100
 
101
  cfg = OmegaConf.load("config.yaml")
102
  model = load_model_from_config(cfg, "laion10M_epoch_6_model_wo_ema.ckpt", verbose=True)
@@ -147,13 +149,13 @@ with block:
147
  with gr.Column():
148
  shared_prompt = gr.Textbox(label="Shared Prompt")
149
  with gr.Row():
150
- show_render_button = gr.Button(value="Only Rendered")
151
- run_button = gr.Button(value="Run")
152
  with gr.Accordion("Model Options", open=False):
153
  with gr.Row():
154
  # model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M", "Textcaps5K-10"], label="Checkpoint", default = "LAION-Glyph-10M")
155
  model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M-Epoch-6", "LAION-Glyph-10M-Epoch-5", "LAION-Glyph-1M"], label="Checkpoint", default = "LAION-Glyph-10M-Epoch-6")
156
- load_button = gr.Button(value = "Load Checkpoint")
157
 
158
  with gr.Accordion("Shared Advanced Options", open=False):
159
  with gr.Row():
@@ -207,7 +209,7 @@ with block:
207
  shared_eta, shared_a_prompt, shared_n_prompt],
208
  outputs=[message, result_gallery])
209
 
210
- load_button.click(fn = load_ckpt,
211
  inputs = [model_ckpt],
212
  outputs = [message, result_gallery]
213
  )
 
97
  torch.cuda.empty_cache()
98
  time.sleep(2)
99
  print("empty the cuda cache")
100
+ return output_str, None
101
+
102
 
103
  cfg = OmegaConf.load("config.yaml")
104
  model = load_model_from_config(cfg, "laion10M_epoch_6_model_wo_ema.ckpt", verbose=True)
 
149
  with gr.Column():
150
  shared_prompt = gr.Textbox(label="Shared Prompt")
151
  with gr.Row():
152
+ show_render_button = gr.Button(value="Render Glyph Image")
153
+ run_button = gr.Button(value="Run Generation")
154
  with gr.Accordion("Model Options", open=False):
155
  with gr.Row():
156
  # model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M", "Textcaps5K-10"], label="Checkpoint", default = "LAION-Glyph-10M")
157
  model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M-Epoch-6", "LAION-Glyph-10M-Epoch-5", "LAION-Glyph-1M"], label="Checkpoint", default = "LAION-Glyph-10M-Epoch-6")
158
+ # load_button = gr.Button(value = "Load Checkpoint")
159
 
160
  with gr.Accordion("Shared Advanced Options", open=False):
161
  with gr.Row():
 
209
  shared_eta, shared_a_prompt, shared_n_prompt],
210
  outputs=[message, result_gallery])
211
 
212
+ model_ckpt.change(load_ckpt,
213
  inputs = [model_ckpt],
214
  outputs = [message, result_gallery]
215
  )
app_old.py DELETED
@@ -1,176 +0,0 @@
1
- from cldm.ddim_hacked import DDIMSampler
2
- import math
3
- from omegaconf import OmegaConf
4
- from scripts.rendertext_tool import Render_Text, load_model_from_config
5
- import gradio as gr
6
- import os
7
- def process_multi_wrapper(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
8
- shared_prompt,
9
- width_0, width_1, width_2, width_3,
10
- ratio_0, ratio_1, ratio_2, ratio_3,
11
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
12
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
13
- yaw_0, yaw_1, yaw_2, yaw_3,
14
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
15
- shared_num_samples, shared_image_resolution,
16
- shared_ddim_steps, shared_guess_mode,
17
- shared_strength, shared_scale, shared_seed,
18
- shared_eta, shared_a_prompt, shared_n_prompt):
19
-
20
- rendered_txt_values = [rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3]
21
- width_values = [width_0, width_1, width_2, width_3]
22
- ratio_values = [ratio_0, ratio_1, ratio_2, ratio_3]
23
- top_left_x_values = [top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3]
24
- top_left_y_values = [top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3]
25
- yaw_values = [yaw_0, yaw_1, yaw_2, yaw_3]
26
- num_rows_values = [num_rows_0, num_rows_1, num_rows_2, num_rows_3]
27
-
28
- return render_tool.process_multi(rendered_txt_values, shared_prompt,
29
- width_values, ratio_values,
30
- top_left_x_values, top_left_y_values,
31
- yaw_values, num_rows_values,
32
- shared_num_samples, shared_image_resolution,
33
- shared_ddim_steps, shared_guess_mode,
34
- shared_strength, shared_scale, shared_seed,
35
- shared_eta, shared_a_prompt, shared_n_prompt
36
- )
37
-
38
- def process_multi_wrapper_only_show_rendered(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
39
- shared_prompt,
40
- width_0, width_1, width_2, width_3,
41
- ratio_0, ratio_1, ratio_2, ratio_3,
42
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
43
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
44
- yaw_0, yaw_1, yaw_2, yaw_3,
45
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
46
- shared_num_samples, shared_image_resolution,
47
- shared_ddim_steps, shared_guess_mode,
48
- shared_strength, shared_scale, shared_seed,
49
- shared_eta, shared_a_prompt, shared_n_prompt):
50
-
51
- rendered_txt_values = [rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3]
52
- width_values = [width_0, width_1, width_2, width_3]
53
- ratio_values = [ratio_0, ratio_1, ratio_2, ratio_3]
54
- top_left_x_values = [top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3]
55
- top_left_y_values = [top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3]
56
- yaw_values = [yaw_0, yaw_1, yaw_2, yaw_3]
57
- num_rows_values = [num_rows_0, num_rows_1, num_rows_2, num_rows_3]
58
-
59
- return render_tool.process_multi(rendered_txt_values, shared_prompt,
60
- width_values, ratio_values,
61
- top_left_x_values, top_left_y_values,
62
- yaw_values, num_rows_values,
63
- shared_num_samples, shared_image_resolution,
64
- shared_ddim_steps, shared_guess_mode,
65
- shared_strength, shared_scale, shared_seed,
66
- shared_eta, shared_a_prompt, shared_n_prompt,
67
- only_show_rendered_image=True)
68
-
69
-
70
- cfg = OmegaConf.load("config.yaml")
71
- model = load_model_from_config(cfg, "model_wo_ema.ckpt", verbose=True)
72
- # model = load_model_from_config(cfg, "model_states.pt", verbose=True)
73
- # model = load_model_from_config(cfg, "model.ckpt", verbose=True)
74
-
75
- ddim_sampler = DDIMSampler(model)
76
- render_tool = Render_Text(model)
77
-
78
-
79
- # description = """
80
- # # <center>Expedit-SAM (Expedite Segment Anything Model without any training)</center>
81
- # Github link: [Link](https://github.com/Expedit-LargeScale-Vision-Transformer/Expedit-SAM)
82
- # You can select the speed mode you want to use from the "Speed Mode" dropdown menu and click "Run" to segment the image you uploaded to the "Input Image" box.
83
- # Points per side is a hyper-parameter that controls the number of points used to generate the segmentation masks. The higher the number, the more accurate the segmentation masks will be, but the slower the inference speed will be. The default value is 12.
84
- # """
85
-
86
- description = """
87
- ## Control Stable Diffusion with Glyph Images
88
- """
89
-
90
- SPACE_ID = os.getenv('SPACE_ID')
91
- if SPACE_ID is not None:
92
- # description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. < a href=" ">< img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></ a></p >'
93
- description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
94
-
95
- block = gr.Blocks().queue()
96
-
97
- with block:
98
- with gr.Row():
99
- gr.Markdown(description)
100
- only_show_rendered_image = gr.Number(value=1, visible=False)
101
-
102
- with gr.Column():
103
-
104
- with gr.Row():
105
- for i in range(4):
106
- with gr.Column():
107
- exec(f"""rendered_txt_{i} = gr.Textbox(label=f"Render Text {i+1}")""")
108
-
109
- with gr.Accordion(f"Advanced options {i+1}", open=False):
110
- exec(f"""width_{i} = gr.Slider(label="Bbox Width", minimum=0., maximum=1, value=0.3, step=0.01) """)
111
- exec(f"""ratio_{i} = gr.Slider(label="Bbox_width_height_ratio", minimum=0., maximum=5, value=0., step=0.02, visible=False) """)
112
- exec(f"""top_left_x_{i} = gr.Slider(label="Bbox Top Left x", minimum=0., maximum=1, value={0.35 - 0.25 * math.cos(math.pi * i)}, step=0.01) """)
113
- exec(f"""top_left_y_{i} = gr.Slider(label="Bbox Top Left y", minimum=0., maximum=1, value={0.1 if i < 2 else 0.6}, step=0.01) """)
114
- exec(f"""yaw_{i} = gr.Slider(label="Bbox Yaw", minimum=-180, maximum=180, value=0, step=5) """)
115
- # exec(f"""num_rows_{i} = gr.Slider(label="num_rows", minimum=1, maximum=4, value=1, step=1, visible=False) """)
116
- exec(f"""num_rows_{i} = gr.Slider(label="num_rows", minimum=1, maximum=4, value=1, step=1) """)
117
-
118
- with gr.Row():
119
- with gr.Column():
120
- shared_prompt = gr.Textbox(label="Shared Prompt")
121
- with gr.Row():
122
- run_button = gr.Button(value="Run")
123
- show_render_button = gr.Button(value="Only Rendered")
124
-
125
- with gr.Accordion("Shared Advanced options", open=False):
126
- with gr.Row():
127
- shared_num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
128
- shared_image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64, visible=False)
129
- shared_strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01, visible=False)
130
- shared_guess_mode = gr.Checkbox(label='Guess Mode', value=False, visible=False)
131
- shared_seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
132
- with gr.Row():
133
- shared_scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
134
- shared_ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
135
- shared_eta = gr.Number(label="eta (DDIM)", value=0.0, visible=False)
136
- with gr.Row():
137
- shared_a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
138
- shared_n_prompt = gr.Textbox(label="Negative Prompt",
139
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
140
-
141
- with gr.Row():
142
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
143
-
144
- run_button.click(fn=process_multi_wrapper,
145
- inputs=[rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
146
- shared_prompt,
147
- width_0, width_1, width_2, width_3,
148
- ratio_0, ratio_1, ratio_2, ratio_3,
149
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
150
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
151
- yaw_0, yaw_1, yaw_2, yaw_3,
152
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
153
- shared_num_samples, shared_image_resolution,
154
- shared_ddim_steps, shared_guess_mode,
155
- shared_strength, shared_scale, shared_seed,
156
- shared_eta, shared_a_prompt, shared_n_prompt],
157
- outputs=[result_gallery])
158
-
159
- show_render_button.click(fn=process_multi_wrapper_only_show_rendered,
160
- inputs=[rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
161
- shared_prompt,
162
- width_0, width_1, width_2, width_3,
163
- ratio_0, ratio_1, ratio_2, ratio_3,
164
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
165
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
166
- yaw_0, yaw_1, yaw_2, yaw_3,
167
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
168
- shared_num_samples, shared_image_resolution,
169
- shared_ddim_steps, shared_guess_mode,
170
- shared_strength, shared_scale, shared_seed,
171
- shared_eta, shared_a_prompt, shared_n_prompt],
172
- outputs=[result_gallery])
173
-
174
-
175
-
176
- block.launch()