Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,807 Bytes
59c3dd8 ef187eb 3cf95dc 4b9fa98 0cffd40 ef187eb 11fa80e b351bc6 63b6eaf 2b0f02c 4b68e4e 11fa80e d9aab39 8b1e96d a434ddd ca39da7 3cf95dc 3599676 ec35e66 4efab5c ec35e66 4efab5c 5d300f8 b351bc6 78536aa b351bc6 4efab5c 8b1e96d b351bc6 a434ddd 5d300f8 8b1e96d f286ae5 a434ddd ca39da7 a434ddd 3cf95dc 9b38787 3a2b9b2 8b1e96d a434ddd 11fa80e a434ddd 3cf95dc a434ddd 0cffd40 8b3ca8d 0cffd40 8b1e96d 0cffd40 4efab5c ee95208 78536aa 8b1e96d 0cffd40 a434ddd 8b1e96d ee95208 a434ddd f6c3dea a434ddd ca39da7 a434ddd 8b3ca8d fe16630 8b3ca8d 8b1e96d a434ddd 8b1e96d a434ddd 8b1e96d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import os
import gradio as gr
import torch
import numpy as np
import random
from diffusers import StableDiffusion3Pipeline, AutoencoderKL, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
import spaces
from PIL import Image
import requests
import transformers
from translatepy import Translator
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
translator = Translator()
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
model = "stabilityai/stable-diffusion-3-medium"
repo= "stabilityai/stable-diffusion-3-medium-diffusers"
MAX_SEED = np.iinfo(np.int32).max
CSS = """
.gradio-container {
max-width: 690px !important;
}
footer {
visibility: hidden;
}
"""
JS = """function () {
gradioURL = window.location.href
if (!gradioURL.endsWith('?__theme=dark')) {
window.location.replace(gradioURL + '?__theme=dark');
}
}"""
vae = AutoencoderKL.from_pretrained(
repo,
subfolder="vae",
torch_dtype=torch.float16,
)
transformer = SD3Transformer2DModel.from_pretrained(
repo,
subfolder="transformer",
torch_dtype=torch.float16,
)
text_encoder_3 = T5EncoderModel.from_pretrained(
repo,
subfolder="text_encoder_3",
torch_dtype=torch.float16,
)
# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
pipe = StableDiffusion3Pipeline.from_pretrained(repo, vae=vae, transformer=transformer, text_encoder_3=text_encoder_3, torch_dtype=torch.float16).to("cuda")
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
# Function
@spaces.GPU()
def generate_image(
prompt,
negative="low quality",
width=1024,
height=1024,
scale=1.5,
steps=28,
clip=3):
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
prompt = str(translator.translate(prompt, 'English'))
print(f'prompt:{prompt}')
image = pipe(
prompt,
negative_prompt=negative,
width=width,
height=height,
guidance_scale=scale,
num_inference_steps=steps,
clip_skip=clip,
generator = generator,
)
return image.images[0]
examples = [
"a cat eating a piece of cheese",
"a ROBOT riding a BLUE horse on Mars, photorealistic",
"Ironman VS Hulk, ultrarealistic",
"a CUTE robot artist painting on an easel",
"Astronaut in a jungle, cold color palette, oil pastel, detailed, 8k",
"An alien holding sign board contain word 'Flash', futuristic, neonpunk",
"Kids going to school, Anime style"
]
# Gradio Interface
with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
gr.HTML("<h1><center>SD3M🦄</center></h1>")
gr.HTML("<p><center><a href='https://huggingface.co/stabilityai/stable-diffusion-3-medium'>sd3m</a> text-to-image generation</center><br><center>Multi-Languages. Adding default prompts to enhance.</center></p>")
with gr.Group():
with gr.Row():
prompt = gr.Textbox(label='Enter Your Prompt', value="best quality, HD, aesthetic", scale=6)
submit = gr.Button(scale=1, variant='primary')
img = gr.Image(label='SD3M Generated Image')
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
negative = gr.Textbox(label="Negative prompt", value="low quality")
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=1280,
step=8,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=1280,
step=8,
value=1024,
)
with gr.Row():
scale = gr.Slider(
label="Guidance",
minimum=3.5,
maximum=7,
step=0.1,
value=5,
)
steps = gr.Slider(
label="Steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
clip = gr.Slider(
label="Clip Skip",
minimum=1,
maximum=10,
step=1,
value=3,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=img,
fn=generate_image,
cache_examples="lazy",
)
prompt.submit(fn=generate_image,
inputs=[prompt, negative, width, height, scale, steps, clip],
outputs=img,
)
submit.click(fn=generate_image,
inputs=[prompt, negative, width, height, scale, steps, clip],
outputs=img,
)
demo.queue().launch() |