ehristoforu's picture
Update app.py
26a39a1 verified
#!/usr/bin/env python
import os
import random
import uuid
import gradio as gr
import numpy as np
from PIL import Image
import spaces
import torch
from diffusers import (
StableDiffusionXLPipeline,
StableDiffusionXLInpaintPipeline,
DPMSolverMultistepScheduler
)
DESCRIPTION = """
# [Visionix Playground](https://huggingface.co/spaces/ehristoforu/Visionix-Playground)
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
MAX_SEED = np.iinfo(np.int32).max
USE_TORCH_COMPILE = 0
ENABLE_CPU_OFFLOAD = 0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pipe = StableDiffusionXLPipeline.from_pretrained(
"ehristoforu/Visionix-alpha",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to('cuda')
pipe_inpaint = StableDiffusionXLInpaintPipeline.from_single_file(
"https://huggingface.co/ehristoforu/Visionix-alpha-inpainting/blob/main/Visionix-alpha-inpainting.safetensors",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_inpaint.scheduler = DPMSolverMultistepScheduler.from_config(pipe_inpaint.scheduler.config)
pipe_inpaint.to('cuda')
def get_model(model):
if model == "Alpha inpaint":
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@spaces.GPU(durarion=50, enable_queue=True)
def generate(
model,
inpaint_image,
mask_image,
blur_factor,
strength,
prompt: str,
negative_prompt: str = "",
use_negative_prompt: bool = False,
seed: int = 0,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 5.5,
randomize_seed: bool = False,
progress=gr.Progress(track_tqdm=True),
):
pipe.to(device)
seed = int(randomize_seed_fn(seed, randomize_seed))
if not use_negative_prompt:
negative_prompt = "" # type: ignore
images = None
if model == "Alpha":
images = pipe(
prompt=prompt,
negative_prompt=f"{negative_prompt}",
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=25,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Alpha inpaint":
blurred_mask = pipe_inpaint.mask_processor.blur(mask_image, blur_factor=blur_factor)
images = pipe_inpaint(
prompt=prompt,
image=inpaint_image,
mask_image=blurred_mask,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=25,
strength=strength,
num_images_per_prompt=1,
output_type="pil",
).images
image_paths = [save_image(img) for img in images]
print(image_paths)
return image_paths, seed
examples = [
"neon holography crystal cat",
"a cat eating a piece of cheese",
"an astronaut riding a horse in space",
"a cartoon of a boy playing with a tiger",
"a cute robot artist painting on an easel, concept art",
"a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
]
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
with gr.Blocks(title="Visionix Playground", css=css) as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_id="duplicate-button",
visible=False,
)
with gr.Row():
model = gr.Radio(
label="Model",
choices=["Alpha", "Alpha inpaint"],
value="Alpha",
interactive=True,
)
md_mask = gr.Markdown("""
⚠️ To generate an inpaint mask, go [here](https://huggingface.co/spaces/ehristoforu/inpaint-mask-maker).
""", visible=False)
inpaint_image = gr.Image(label="Inpaint Image", interactive=True, scale=5, visible=False, type="pil")
mask_image = gr.Image(label="Mask Image", interactive=True, scale=5, visible=False, type="pil")
blur_factor = gr.Slider(label="Mask Blur Factor", minimum=0, maximum=100, value=4, step=1, interactive=True, visible=False)
strength = gr.Slider(label="Denoising Strength", minimum=0.00, maximum=1.00, value=0.70, step=0.01, interactive=True, visible=False)
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
with gr.Accordion("Advanced options", open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=8,
lines=6,
value="cartoon, 3D, disfigured, bad, art, deformed, extra limbs, weird, blurry, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn, hands, poorly drawn face, mutation, ugly, bad, anatomy, bad proportions, extra limbs, clone, clone-faced, cross proportions, missing arms, malformed limbs, missing legs, mutated, hands, fused fingers, too many fingers, photo shop, video game, ugly, tiling, cross-eye, mutation of eyes, long neck, bonnet, hat, beanie, cap, B&W",
placeholder="Enter a negative prompt",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
visible=True
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row(visible=True):
width = gr.Slider(
label="Width",
minimum=512,
maximum=2048,
step=8,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=2048,
step=8,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.1,
maximum=20,
step=0.1,
value=5.5,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[result, seed],
fn=generate,
cache_examples=False,
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
api_name=False,
)
model.change(
fn=get_model,
inputs=model,
outputs=[md_mask, inpaint_image, mask_image, blur_factor, strength],
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
run_button.click,
],
fn=generate,
inputs=[
model,
inpaint_image,
mask_image,
blur_factor,
strength,
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
guidance_scale,
randomize_seed,
],
outputs=[result, seed],
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=20).launch(show_api=False, debug=False)