Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from torchvision import models, transforms | |
from PIL import Image, ImageEnhance | |
import numpy as np | |
import cv2 | |
model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True) | |
model.eval() | |
transform = transforms.Compose([ | |
transforms.ToTensor() | |
]) | |
def detect_dress(image): | |
image_tensor = transform(image).unsqueeze(0) | |
with torch.no_grad(): | |
outputs = model(image_tensor) | |
boxes = outputs[0]['boxes'].numpy() | |
scores = outputs[0]['scores'].numpy() | |
threshold = 0.8 | |
dress_boxes = [box for box, score in zip(boxes, scores) if score > threshold] | |
draw = ImageDraw.Draw(image) | |
for box in dress_boxes: | |
draw.rectangle(box.tolist(), outline="red", width=3) | |
return image, dress_boxes | |
def crop_image(image, box): | |
return image.crop(box) | |
def adjust_color(image, factor): | |
enhancer = ImageEnhance.Color(image) | |
return enhancer.enhance(factor) | |
def process_image(image, edit_type, factor): | |
detected_image, boxes = detect_dress(image) | |
if not boxes: | |
return detected_image, "No dresses detected." | |
if edit_type == "Crop": | |
box = boxes[0] | |
edited_image = crop_image(image, box) | |
elif edit_type == "Adjust Color": | |
edited_image = adjust_color(image, factor) | |
else: | |
edited_image = image | |
return edited_image, "Edit applied." | |
iface = gr.Interface( | |
fn=process_image, | |
inputs=[ | |
gr.inputs.Image(type="pil"), | |
gr.inputs.Radio(choices=["None", "Crop", "Adjust Color"]), | |
gr.inputs.Slider(0.5, 2.0, step=0.1, label="Factor") | |
], | |
outputs=[ | |
gr.outputs.Image(type="pil"), | |
gr.outputs.Textbox(label="Result") | |
], | |
live=True | |
) | |
iface.launch() |