Spaces:
Runtime error
Runtime error
File size: 1,486 Bytes
1ec3b8e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import gradio as gr
from PIL import Image
import numpy as np
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline, StableDiffusionPipeline, UNet2DConditionModel, AutoencoderKL
from transformers import BertTokenizer, BertModel
device="cuda"
model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"
# pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet).to(device)
pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id).to(device)
def resize(w_val,l_val,img):
img = Image.open(img)
img = img.resize((w_val,l_val), Image.Resampling.LANCZOS)
return img
def infer(prompt, guide, steps, width, height):
image_list = pipe_text2img([prompt], guidance_scale=guide, num_inference_steps=steps, width=width, height=height)
images = []
for i, image in enumerate(image_list["sample"]):
images.append(image)
return image
gr.Interface(fn=infer, inputs=
[
# gr.Image(source="upload", type="filepath", label="ๅๅงๅพๅ"),
gr.Textbox(label = 'ๆ็คบ่ฏ(prompt)'),
gr.Slider(2, 15, value = 7, label = 'ๆๆฌๅผๅฏผๅผบๅบฆ'),
gr.Slider(10, 30, value = 20, step = 1, label = '่ฟญไปฃๆฌกๆฐ'),
gr.Slider(256, 768, value = 512, step = 64, label = 'ๅฎฝๅบฆ'),
gr.Slider(256, 768, value = 512, step = 64, label = '้ซๅบฆ'),
],outputs='image').queue(max_size=10).launch(enable_queue=True) |