Spaces:
Sleeping
Sleeping
patrickligardes
commited on
Commit
β’
3e4084f
1
Parent(s):
ecf5099
Update app.py
Browse files
app.py
CHANGED
@@ -122,7 +122,7 @@ pipe = TryonPipeline.from_pretrained(
|
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
@spaces.GPU
|
125 |
-
def start_tryon(
|
126 |
device = "cuda"
|
127 |
category = int(category)
|
128 |
if category==0:
|
@@ -137,7 +137,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
137 |
pipe.unet_encoder.to(device)
|
138 |
|
139 |
garm_img= garm_img.convert("RGB").resize((768,1024))
|
140 |
-
human_img_orig =
|
141 |
|
142 |
if is_checked_crop:
|
143 |
width, height = human_img_orig.size
|
@@ -165,7 +165,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
165 |
mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
|
166 |
mask = mask.resize((768,1024))
|
167 |
else:
|
168 |
-
mask = pil_to_binary_mask(
|
169 |
# mask = transforms.ToTensor()(mask)
|
170 |
# mask = mask.unsqueeze(0)
|
171 |
mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
@@ -288,7 +288,11 @@ with image_blocks as demo:
|
|
288 |
with gr.Row():
|
289 |
category = gr.Textbox(placeholder="0 = upper body, 1 = lower body, 2 = full body", show_label=False, elem_id="prompt")
|
290 |
|
291 |
-
|
|
|
|
|
|
|
|
|
292 |
|
293 |
with gr.Column():
|
294 |
garm_img = gr.Image(label="Garment", sources='upload', type="pil")
|
|
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
@spaces.GPU
|
125 |
+
def start_tryon(imgs,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed, category):
|
126 |
device = "cuda"
|
127 |
category = int(category)
|
128 |
if category==0:
|
|
|
137 |
pipe.unet_encoder.to(device)
|
138 |
|
139 |
garm_img= garm_img.convert("RGB").resize((768,1024))
|
140 |
+
human_img_orig = imgs.convert("RGB")
|
141 |
|
142 |
if is_checked_crop:
|
143 |
width, height = human_img_orig.size
|
|
|
165 |
mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
|
166 |
mask = mask.resize((768,1024))
|
167 |
else:
|
168 |
+
mask = pil_to_binary_mask(imgs.convert("RGB").resize((768, 1024)))
|
169 |
# mask = transforms.ToTensor()(mask)
|
170 |
# mask = mask.unsqueeze(0)
|
171 |
mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
|
|
288 |
with gr.Row():
|
289 |
category = gr.Textbox(placeholder="0 = upper body, 1 = lower body, 2 = full body", show_label=False, elem_id="prompt")
|
290 |
|
291 |
+
example = gr.Examples(
|
292 |
+
inputs=imgs,
|
293 |
+
examples_per_page=10,
|
294 |
+
examples=human_ex_list
|
295 |
+
)
|
296 |
|
297 |
with gr.Column():
|
298 |
garm_img = gr.Image(label="Garment", sources='upload', type="pil")
|