nick911 commited on
Commit
d18ab7c
1 Parent(s): 185a961

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -3,26 +3,29 @@ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDisc
3
  from huggingface_hub import hf_hub_download
4
  from safetensors.torch import load_file
5
  import gradio as gr
 
 
6
 
7
  base = "stabilityai/stable-diffusion-xl-base-1.0"
8
  repo = "ByteDance/SDXL-Lightning"
9
  ckpt = "sdxl_lightning_1step_unet_x0.safetensors" # Use the correct ckpt for your step setting!
10
 
11
  # Load model.
12
-
13
- def generate():
14
  unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
15
  unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
16
  pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
17
  # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
18
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
19
-
20
- # Ensure using the same inference steps as the loaded model and CFG set to 0.
21
-
22
-
23
- def greet(prompt):
24
  image = pipe(prompt, num_inference_steps=1, guidance_scale=0).images[0].save("output.png")
25
  return image
 
 
 
 
 
 
26
 
27
- demo = gr.Interface(fn=greet, inputs="text", outputs="image")
28
  demo.launch()
 
3
  from huggingface_hub import hf_hub_download
4
  from safetensors.torch import load_file
5
  import gradio as gr
6
+ import spaces
7
+
8
 
9
  base = "stabilityai/stable-diffusion-xl-base-1.0"
10
  repo = "ByteDance/SDXL-Lightning"
11
  ckpt = "sdxl_lightning_1step_unet_x0.safetensors" # Use the correct ckpt for your step setting!
12
 
13
  # Load model.
14
+ @spaces.GPU
15
+ def generate(prompt):
16
  unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
17
  unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
18
  pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
19
  # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
20
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
 
 
 
 
 
21
  image = pipe(prompt, num_inference_steps=1, guidance_scale=0).images[0].save("output.png")
22
  return image
23
+ # Ensure using the same inference steps as the loaded model and CFG set to 0.
24
+
25
+ # @spaces.GPU
26
+ # def greet(prompt):
27
+ # image = pipe(prompt, num_inference_steps=1, guidance_scale=0).images[0].save("output.png")
28
+ # return image
29
 
30
+ demo = gr.Interface(fn=generate, inputs="text", outputs="image")
31
  demo.launch()