patrickligardes commited on
Commit
44051ca
β€’
1 Parent(s): 3e4084f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -122,7 +122,7 @@ pipe = TryonPipeline.from_pretrained(
122
  pipe.unet_encoder = UNet_Encoder
123
 
124
  @spaces.GPU
125
- def start_tryon(imgs,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed, category):
126
  device = "cuda"
127
  category = int(category)
128
  if category==0:
@@ -137,7 +137,7 @@ def start_tryon(imgs,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
137
  pipe.unet_encoder.to(device)
138
 
139
  garm_img= garm_img.convert("RGB").resize((768,1024))
140
- human_img_orig = imgs.convert("RGB")
141
 
142
  if is_checked_crop:
143
  width, height = human_img_orig.size
@@ -165,7 +165,7 @@ def start_tryon(imgs,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
165
  mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
166
  mask = mask.resize((768,1024))
167
  else:
168
- mask = pil_to_binary_mask(imgs.convert("RGB").resize((768, 1024)))
169
  # mask = transforms.ToTensor()(mask)
170
  # mask = mask.unsqueeze(0)
171
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
@@ -276,11 +276,11 @@ for ex_human in human_list_path:
276
 
277
  image_blocks = gr.Blocks().queue()
278
  with image_blocks as demo:
279
- gr.Markdown("## VirtualFit")
280
- gr.Markdown("VirtualFIT Demo")
281
  with gr.Row():
282
  with gr.Column():
283
- imgs = gr.Image(label="Person Image", sources='upload', type="pil")
284
  with gr.Row():
285
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
286
  with gr.Row():
@@ -293,6 +293,7 @@ with image_blocks as demo:
293
  examples_per_page=10,
294
  examples=human_ex_list
295
  )
 
296
 
297
  with gr.Column():
298
  garm_img = gr.Image(label="Garment", sources='upload', type="pil")
 
122
  pipe.unet_encoder = UNet_Encoder
123
 
124
  @spaces.GPU
125
+ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed, category):
126
  device = "cuda"
127
  category = int(category)
128
  if category==0:
 
137
  pipe.unet_encoder.to(device)
138
 
139
  garm_img= garm_img.convert("RGB").resize((768,1024))
140
+ human_img_orig = dict["background"].convert("RGB")
141
 
142
  if is_checked_crop:
143
  width, height = human_img_orig.size
 
165
  mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
166
  mask = mask.resize((768,1024))
167
  else:
168
+ mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
169
  # mask = transforms.ToTensor()(mask)
170
  # mask = mask.unsqueeze(0)
171
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
 
276
 
277
  image_blocks = gr.Blocks().queue()
278
  with image_blocks as demo:
279
+ gr.Markdown("## DressFit")
280
+ gr.Markdown("DressFit Demo")
281
  with gr.Row():
282
  with gr.Column():
283
+ imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
284
  with gr.Row():
285
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
286
  with gr.Row():
 
293
  examples_per_page=10,
294
  examples=human_ex_list
295
  )
296
+
297
 
298
  with gr.Column():
299
  garm_img = gr.Image(label="Garment", sources='upload', type="pil")