Spaces:
Running
on
Zero
Running
on
Zero
parokshsaxena
commited on
Commit
β’
f65f11f
1
Parent(s):
4191c16
adding option to pass background image for adding background
Browse files- app.py +25 -6
- src/background_processor.py +38 -0
app.py
CHANGED
@@ -4,6 +4,7 @@ from PIL import Image
|
|
4 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
5 |
from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
|
6 |
from src.unet_hacked_tryon import UNet2DConditionModel
|
|
|
7 |
from transformers import (
|
8 |
CLIPImageProcessor,
|
9 |
CLIPVisionModelWithProjection,
|
@@ -131,7 +132,7 @@ POSE_HEIGHT = int(HEIGHT/2) #int(HEIGHT/2)
|
|
131 |
CATEGORY = "upper_body" # "lower_body"
|
132 |
|
133 |
@spaces.GPU
|
134 |
-
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
135 |
device = "cuda"
|
136 |
# device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
137 |
|
@@ -249,9 +250,17 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
249 |
if is_checked_crop:
|
250 |
out_img = images[0].resize(crop_size)
|
251 |
human_img_orig.paste(out_img, (int(left), int(top)))
|
252 |
-
|
|
|
253 |
else:
|
254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
# return images[0], mask_gray
|
256 |
|
257 |
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
@@ -298,13 +307,23 @@ with image_blocks as demo:
|
|
298 |
inputs=garm_img,
|
299 |
examples_per_page=8,
|
300 |
examples=garm_list_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
with gr.Column():
|
302 |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400)
|
303 |
masked_img = gr.Image(label="Masked image output", elem_id="masked-img", show_share_button=False)
|
304 |
-
with gr.Column():
|
305 |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400)
|
306 |
image_out = gr.Image(label="Output", elem_id="output-img", show_share_button=False)
|
307 |
-
|
308 |
|
309 |
|
310 |
|
@@ -317,7 +336,7 @@ with image_blocks as demo:
|
|
317 |
|
318 |
|
319 |
|
320 |
-
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed], outputs=[image_out,masked_img], api_name='tryon')
|
321 |
|
322 |
|
323 |
|
|
|
4 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
5 |
from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
|
6 |
from src.unet_hacked_tryon import UNet2DConditionModel
|
7 |
+
from src.background_processor import BackgroundProcessor
|
8 |
from transformers import (
|
9 |
CLIPImageProcessor,
|
10 |
CLIPVisionModelWithProjection,
|
|
|
132 |
CATEGORY = "upper_body" # "lower_body"
|
133 |
|
134 |
@spaces.GPU
|
135 |
+
def start_tryon(dict,garm_img,garment_des, background_img, is_checked,is_checked_crop,denoise_steps,seed):
|
136 |
device = "cuda"
|
137 |
# device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
138 |
|
|
|
250 |
if is_checked_crop:
|
251 |
out_img = images[0].resize(crop_size)
|
252 |
human_img_orig.paste(out_img, (int(left), int(top)))
|
253 |
+
final_image = human_img_orig
|
254 |
+
# return human_img_orig, mask_gray
|
255 |
else:
|
256 |
+
final_image = images[0]
|
257 |
+
# return images[0], mask_gray
|
258 |
+
|
259 |
+
# apply background to final image
|
260 |
+
if background_img:
|
261 |
+
logging.info("Adding background")
|
262 |
+
final_image = BackgroundProcessor.add_background(final_image, background_img)
|
263 |
+
return final_image, mask_gray
|
264 |
# return images[0], mask_gray
|
265 |
|
266 |
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
|
|
307 |
inputs=garm_img,
|
308 |
examples_per_page=8,
|
309 |
examples=garm_list_path)
|
310 |
+
|
311 |
+
with gr.Column():
|
312 |
+
background_img = gr.Image(label="Background", sources='upload', type="pil")
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
with gr.Row():
|
316 |
+
image_out = gr.Image(label="Output", elem_id="output-img", show_share_button=False)
|
317 |
+
with gr.Row():
|
318 |
+
masked_img = gr.Image(label="Masked image output", elem_id="masked-img", show_share_button=False)
|
319 |
+
"""
|
320 |
with gr.Column():
|
321 |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400)
|
322 |
masked_img = gr.Image(label="Masked image output", elem_id="masked-img", show_share_button=False)
|
323 |
+
with gr.Column():
|
324 |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400)
|
325 |
image_out = gr.Image(label="Output", elem_id="output-img", show_share_button=False)
|
326 |
+
"""
|
327 |
|
328 |
|
329 |
|
|
|
336 |
|
337 |
|
338 |
|
339 |
+
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, background_img, is_checked,is_checked_crop, denoise_steps, seed], outputs=[image_out,masked_img], api_name='tryon')
|
340 |
|
341 |
|
342 |
|
src/background_processor.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import numpy as np
|
3 |
+
from preprocess.humanparsing.run_parsing import Parsing
|
4 |
+
|
5 |
+
parsing_model = Parsing(0)
|
6 |
+
|
7 |
+
class BackgroundProcessor:
|
8 |
+
@classmethod
|
9 |
+
def add_background(cls, human_img: Image, background_img: Image):
|
10 |
+
|
11 |
+
human_img = human_img.convert("RGB")
|
12 |
+
width = human_img.width
|
13 |
+
height = human_img.height
|
14 |
+
|
15 |
+
# Create mask image
|
16 |
+
parsed_img, _ = parsing_model(human_img)
|
17 |
+
mask_img = parsed_img.convert("L")
|
18 |
+
mask_img = mask_img.resize((width, height))
|
19 |
+
|
20 |
+
background_img = background_img.convert("RGB")
|
21 |
+
background_img = background_img.resize((width, height))
|
22 |
+
|
23 |
+
# Convert to numpy arrays
|
24 |
+
human_np = np.array(human_img)
|
25 |
+
mask_np = np.array(mask_img)
|
26 |
+
background_np = np.array(background_img)
|
27 |
+
|
28 |
+
# Ensure mask is 3-channel (RGB) for compatibility
|
29 |
+
mask_np = np.stack((mask_np,) * 3, axis=-1)
|
30 |
+
|
31 |
+
# Apply the mask to human_img
|
32 |
+
human_with_background = np.where(mask_np > 0, human_np, background_np)
|
33 |
+
|
34 |
+
# Convert back to PIL Image
|
35 |
+
result_img = Image.fromarray(human_with_background.astype('uint8'))
|
36 |
+
|
37 |
+
# Return or save the result
|
38 |
+
return result_img
|