File size: 1,288 Bytes
1ec3b8e
da639ee
bd02ffa
1ec3b8e
 
078bead
1ec3b8e
 
 
 
 
 
 
 
 
 
 
 
2278ab0
 
1ec3b8e
 
 
 
 
 
078bead
 
 
 
2b8b583
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
from diffusers import StableDiffusionPipeline
# from PIL import Image


device="cuda"
model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"

# pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet).to(device)
pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id).to(device)

def resize(w_val,l_val,img):
  img = Image.open(img)
  img = img.resize((w_val,l_val), Image.Resampling.LANCZOS)
  return img


def infer(prompt, guide, steps, width, height): 
    output = pipe_text2img(prompt, guidance_scale=guide, num_inference_steps=steps, width=width, height=height)
    image = images.images[0]
    return image

gr.Interface(fn=infer, inputs=
    [
        # gr.Image(source="upload", type="filepath", label="ๅŽŸๅง‹ๅ›พๅƒ"), 
        gr.Textbox(label = 'ๆ็คบ่ฏ(prompt)'),
        gr.Slider(2, 15, value = 7, label = 'ๆ–‡ๆœฌๅผ•ๅฏผๅผบๅบฆ(guidance scale)'),
        gr.Slider(10, 30, value = 20, step = 1, label = '่ฟญไปฃๆฌกๆ•ฐ(inference steps)'),
        gr.Slider(256, 768, value = 512, step = 64, label = 'ๅฎฝๅบฆ(width)'),
        gr.Slider(256, 768, value = 512, step = 64, label = '้ซ˜ๅบฆ(height)'),
    ],outputs='image').queue(concurrency_count=10).launch()