File size: 1,318 Bytes
1ec3b8e
da639ee
bd02ffa
1ec3b8e
 
2b8b583
1ec3b8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2b8b583
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from diffusers import StableDiffusionPipeline
# from PIL import Image


device="cpu"
model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"

# pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet).to(device)
pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id).to(device)

def resize(w_val,l_val,img):
  img = Image.open(img)
  img = img.resize((w_val,l_val), Image.Resampling.LANCZOS)
  return img


def infer(prompt, guide, steps, width, height): 
    image_list = pipe_text2img([prompt], guidance_scale=guide, num_inference_steps=steps, width=width, height=height)
    images = []
    for i, image in enumerate(image_list["sample"]):
        images.append(image)    
    return image

gr.Interface(fn=infer, inputs=
    [
        # gr.Image(source="upload", type="filepath", label="ๅŽŸๅง‹ๅ›พๅƒ"), 
        gr.Textbox(label = 'ๆ็คบ่ฏ(prompt)'),
        gr.Slider(2, 15, value = 7, label = 'ๆ–‡ๆœฌๅผ•ๅฏผๅผบๅบฆ'),
        gr.Slider(10, 30, value = 20, step = 1, label = '่ฟญไปฃๆฌกๆ•ฐ'),
        gr.Slider(256, 768, value = 512, step = 64, label = 'ๅฎฝๅบฆ'),
        gr.Slider(256, 768, value = 512, step = 64, label = '้ซ˜ๅบฆ'),
    ],outputs='image').queue(concurrency_count=10).launch()