Spaces:
Running
on
Zero
Running
on
Zero
parokshsaxena
commited on
Commit
β’
537f619
1
Parent(s):
3242b28
making image instead of imageEditor for human img input
Browse files
app.py
CHANGED
@@ -151,10 +151,8 @@ def start_tryon(human_img_dict,garm_img,garment_des, background_img, is_checked,
|
|
151 |
pipe.to(device)
|
152 |
pipe.unet_encoder.to(device)
|
153 |
|
154 |
-
|
155 |
-
|
156 |
-
else:
|
157 |
-
human_img_orig = dict.convert("RGB") # Image
|
158 |
|
159 |
"""
|
160 |
# Derive HEIGHT & WIDTH such that width is not more than 1000. This will cater to both Shein images (4160x6240) of 3:4 AR and model standard images ( 768x1024 ) of 2:3 AR
|
@@ -298,15 +296,15 @@ human_list = os.listdir(os.path.join(example_path,"human"))
|
|
298 |
human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
|
299 |
|
300 |
human_ex_list = []
|
301 |
-
|
302 |
-
|
303 |
for ex_human in human_list_path:
|
304 |
ex_dict= {}
|
305 |
ex_dict['background'] = ex_human
|
306 |
ex_dict['layers'] = None
|
307 |
ex_dict['composite'] = None
|
308 |
human_ex_list.append(ex_dict)
|
309 |
-
|
310 |
##default human
|
311 |
|
312 |
|
@@ -319,8 +317,8 @@ with image_blocks as demo:
|
|
319 |
with gr.Column():
|
320 |
# changing from ImageEditor to Image to allow easy passing of data through API
|
321 |
# instead of passing {"dictionary": <>} ( which is failing ), we can directly pass the image
|
322 |
-
imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
|
323 |
-
|
324 |
with gr.Row():
|
325 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
|
326 |
with gr.Row():
|
|
|
151 |
pipe.to(device)
|
152 |
pipe.unet_encoder.to(device)
|
153 |
|
154 |
+
#human_img_orig = human_img_dict["background"].convert("RGB") # ImageEditor
|
155 |
+
human_img_orig = dict.convert("RGB") # Image
|
|
|
|
|
156 |
|
157 |
"""
|
158 |
# Derive HEIGHT & WIDTH such that width is not more than 1000. This will cater to both Shein images (4160x6240) of 3:4 AR and model standard images ( 768x1024 ) of 2:3 AR
|
|
|
296 |
human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
|
297 |
|
298 |
human_ex_list = []
|
299 |
+
human_ex_list = human_list_path # Image
|
300 |
+
""" if using ImageEditor instead of Image while taking input, use this - ImageEditor
|
301 |
for ex_human in human_list_path:
|
302 |
ex_dict= {}
|
303 |
ex_dict['background'] = ex_human
|
304 |
ex_dict['layers'] = None
|
305 |
ex_dict['composite'] = None
|
306 |
human_ex_list.append(ex_dict)
|
307 |
+
"""
|
308 |
##default human
|
309 |
|
310 |
|
|
|
317 |
with gr.Column():
|
318 |
# changing from ImageEditor to Image to allow easy passing of data through API
|
319 |
# instead of passing {"dictionary": <>} ( which is failing ), we can directly pass the image
|
320 |
+
#imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
|
321 |
+
imgs = gr.Image(sources='upload', type='pil',label='Human. Mask with pen or use auto-masking')
|
322 |
with gr.Row():
|
323 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
|
324 |
with gr.Row():
|