Spaces:
Running
on
Zero
Running
on
Zero
parokshsaxena
commited on
Commit
β’
5b49f28
1
Parent(s):
1f56321
reducing size to 1/4
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
3 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
@@ -25,7 +26,6 @@ from preprocess.openpose.run_openpose import OpenPose
|
|
25 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
26 |
from torchvision.transforms.functional import to_pil_image
|
27 |
|
28 |
-
|
29 |
def pil_to_binary_mask(pil_image, threshold=0):
|
30 |
np_image = np.array(pil_image)
|
31 |
grayscale_image = Image.fromarray(np_image).convert("L")
|
@@ -121,10 +121,10 @@ pipe = TryonPipeline.from_pretrained(
|
|
121 |
)
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
-
WIDTH = int(4160/
|
125 |
-
HEIGHT = int(6240/
|
126 |
-
POSE_WIDTH = int(WIDTH/
|
127 |
-
POSE_HEIGHT = int(HEIGHT/
|
128 |
|
129 |
@spaces.GPU
|
130 |
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
@@ -158,6 +158,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
158 |
model_parse, _ = parsing_model(human_img.resize((POSE_WIDTH, POSE_HEIGHT)))
|
159 |
mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
|
160 |
mask = mask.resize((WIDTH, HEIGHT))
|
|
|
161 |
else:
|
162 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((WIDTH, HEIGHT)))
|
163 |
# mask = transforms.ToTensor()(mask)
|
|
|
1 |
+
import logging
|
2 |
import gradio as gr
|
3 |
from PIL import Image
|
4 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
|
|
26 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
27 |
from torchvision.transforms.functional import to_pil_image
|
28 |
|
|
|
29 |
def pil_to_binary_mask(pil_image, threshold=0):
|
30 |
np_image = np.array(pil_image)
|
31 |
grayscale_image = Image.fromarray(np_image).convert("L")
|
|
|
121 |
)
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
+
WIDTH = int(4160/4) # 768
|
125 |
+
HEIGHT = int(6240/4) # 1024
|
126 |
+
POSE_WIDTH = int(WIDTH/2) # int(WIDTH/2)
|
127 |
+
POSE_HEIGHT = int(HEIGHT/2) #int(HEIGHT/2)
|
128 |
|
129 |
@spaces.GPU
|
130 |
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
|
|
158 |
model_parse, _ = parsing_model(human_img.resize((POSE_WIDTH, POSE_HEIGHT)))
|
159 |
mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
|
160 |
mask = mask.resize((WIDTH, HEIGHT))
|
161 |
+
logging.info("Mask location on model identified")
|
162 |
else:
|
163 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((WIDTH, HEIGHT)))
|
164 |
# mask = transforms.ToTensor()(mask)
|