|
|
|
import spaces |
|
import torch |
|
from PIL import Image |
|
from diffusers import ControlNetModel, DiffusionPipeline |
|
from diffusers.utils import load_image |
|
import gradio as gr |
|
import warnings |
|
warnings.filterwarnings("ignore") |
|
|
|
def resize_for_condition_image(input_image: Image, resolution: int): |
|
input_image = input_image.convert("RGB") |
|
W, H = input_image.size |
|
k = float(resolution) / min(H, W) |
|
H *= k |
|
W *= k |
|
H = int(round(H / 64.0)) * 64 |
|
W = int(round(W / 64.0)) * 64 |
|
img = input_image.resize((W, H), resample=Image.LANCZOS) |
|
return img |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
controlnet = ControlNetModel.from_pretrained('lllyasviel/control_v11f1e_sd15_tile', |
|
torch_dtype=torch.float16) |
|
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", |
|
custom_pipeline="stable_diffusion_controlnet_img2img", |
|
controlnet=controlnet, |
|
torch_dtype=torch.float16).to(device) |
|
|
|
|
|
@spaces.GPU(enable_queue=True) |
|
def super_esr(source_image,prompt,negative_prompt,strength,seed,num_inference_steps): |
|
condition_image = resize_for_condition_image(source_image, 1024) |
|
generator = torch.Generator(device="cuda").manual_seed(seed) |
|
|
|
image = pipe(prompt=prompt, |
|
negative_prompt="blur, lowres, bad anatomy, bad hands, cropped, worst quality", |
|
image=condition_image, |
|
controlnet_conditioning_image=condition_image, |
|
width=condition_image.size[0], |
|
height=condition_image.size[1], |
|
strength=strength, |
|
generator=generator, |
|
num_inference_steps=num_inference_steps, |
|
).images[0] |
|
|
|
return image |
|
|
|
|
|
|
|
inputs=[ |
|
gr.Image(type="pil",label="Source Image"), |
|
gr.Textbox(lines=2,label="Prompt"), |
|
gr.Textbox(lines=2,label="Negative Prompt"), |
|
gr.Slider(minimum=0,maximum=1,value=1.0,label="Strength"), |
|
gr.Slider(minimum=-100000,maximum=100000,value=1,label="Seed"), |
|
gr.Slider(minimum=0,maximum=100,value=20,label="Num Inference Steps") |
|
] |
|
outputs=[ |
|
gr.Image(type="pil",label="Output Image") |
|
] |
|
title="Super ESR" |
|
description="Super ESR is a super resolution model that uses diffusion to generate high resolution images from low resolution images" |
|
|
|
|
|
|
|
|
|
demo=gr.Interface(fn=super_esr,inputs=inputs,outputs=outputs,title=title,description=description) |
|
|
|
demo.queue(max_size=20).launch() |
|
|
|
|
|
|