Spaces:
Runtime error
Runtime error
import gradio as gr | |
import modin.pandas as pd | |
import torch | |
import time | |
from optimum.intel import OVStableDiffusionXLPipeline | |
import numpy as np | |
from PIL import Image | |
from diffusers import AutoPipelineForImage2Image | |
from diffusers.utils import load_image | |
import math | |
from DeepCache import DeepCacheSDHelper | |
adapter_id = "latent-consistency/lcm-lora-sdv1-5" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# helper = DeepCacheSDHelper(pipe=pipe) | |
# helper.set_params( | |
# cache_interval=3, | |
# cache_branch_id=0, | |
# ) | |
# helper.enable() | |
# pipe.compile() | |
def resize(target_size,source): | |
original_width,original_height =source.size | |
aspect_ratio = original_height / original_width | |
# 计算新的高度以保持宽高比,假设我们先确定宽度为512像素 | |
new_width = target_size | |
new_height = int(new_width*aspect_ratio) | |
# 如果新高度超过目标大小,则重新计算宽度以保持目标高度 | |
if new_height > target_size: | |
new_height = target_size | |
new_width = int(new_height / aspect_ratio) | |
print("宽高",original_height,original_width,aspect_ratio,new_height) | |
# 等比例缩放图片 | |
# resized_image = Image.fromarray(image_array).resize((new_width, new_height), resample=Image.LANCZOS) | |
resized_image =source.resize((new_width, new_height)) | |
return resized_image | |
def infer(model_id,source_img, prompt, steps, seed, Strength): | |
pipe = OVStableDiffusionImg2ImgPipeline.from_pretrained(model_id, torch_dtype=torch.float16, export=True) if torch.cuda.is_available() else AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo") | |
pipe = pipe.to(device) | |
# pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) | |
# tcd_lora_id = "h1t/TCD-SDXL-LoRA" | |
# pipe.load_lora_weights(tcd_lora_id) | |
# pipe.fuse_lora() | |
start_time = time.time() | |
generator = torch.Generator(device).manual_seed(seed) | |
if int(steps * Strength) < 1: | |
steps = math.ceil(1 / max(0.10, Strength)) | |
source_image = resize(512, source_img) | |
source_image.save('source.png') | |
image = pipe(prompt, image=source_image, strength=Strength, guidance_scale=0.0, num_inference_steps=steps).images[0] | |
end_time = time.time() | |
elapsed_time = end_time - start_time | |
print("生成时间",elapsed_time) | |
return image | |
gr.Interface(fn=infer, inputs=[ | |
gr.Text(value="IDKiro/sdxs-512-dreamshaper", label="Checkpoint"), | |
gr.Image(sources=["upload", "webcam", "clipboard"], type="filepath", label="Raw Image."), | |
gr.Textbox(label = 'Prompt Input Text. 77 Token (Keyword or Symbol) Maximum'), | |
gr.Slider(1, 5, value = 2, step = 1, label = 'Number of Iterations'), | |
gr.Slider(label = "Seed", minimum = 0, maximum = 987654321987654321, step = 1, randomize = True), | |
gr.Slider(label='Strength', minimum = 0.1, maximum = 1, step = .05, value = .5)], | |
outputs='image', title = "Stable Diffusion XL Turbo Image to Image Pipeline CPU", description = "For more information on Stable Diffusion XL Turbo see https://huggingface.co/stabilityai/sdxl-turbo <br><br>Upload an Image, Use your Cam, or Paste an Image. Then enter a Prompt, or let it just do its Thing, then click submit. For more informationon about Stable Diffusion or Suggestions for prompts, keywords, artists or styles see https://github.com/Maks-s/sd-akashic", | |
article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").queue(max_size=10).launch() |