weifeng-chen's picture
init commit
1ec3b8e
raw
history blame
1.5 kB
import gradio as gr
import torch
from PIL import Image
import numpy as np
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline, StableDiffusionPipeline, UNet2DConditionModel, AutoencoderKL
from transformers import BertTokenizer, BertModel
device="cuda"
model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"
# pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet).to(device)
pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id).to(device)
def resize(w_val,l_val,img):
img = Image.open(img)
img = img.resize((w_val,l_val), Image.Resampling.LANCZOS)
return img
def infer(prompt, guide, steps, width, height):
image_list = pipe_text2img([prompt], guidance_scale=guide, num_inference_steps=steps, width=width, height=height)
images = []
for i, image in enumerate(image_list["sample"]):
images.append(image)
return image
gr.Interface(fn=infer, inputs=
[
# gr.Image(source="upload", type="filepath", label="原始图像"),
gr.Textbox(label = '提示词(prompt)'),
gr.Slider(2, 15, value = 7, label = '文本引导强度'),
gr.Slider(10, 30, value = 20, step = 1, label = '迭代次数'),
gr.Slider(256, 768, value = 512, step = 64, label = '宽度'),
gr.Slider(256, 768, value = 512, step = 64, label = '高度'),
],outputs='image').queue(max_size=10).launch(enable_queue=True)