multimodalart HF staff commited on
Commit
06f0278
1 Parent(s): 2b62414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -11
app.py CHANGED
@@ -2,18 +2,36 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  import spaces
5
- from diffusers import AuraFlowPipeline
6
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
- pipe = AuraFlowPipeline.from_pretrained(
11
- "fal/AuraFlow-v0.2",
12
- torch_dtype=torch.float16
 
 
 
 
 
13
  ).to("cuda")
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
- MAX_IMAGE_SIZE = 1024
17
 
18
  @spaces.GPU()
19
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
@@ -23,15 +41,15 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
23
  width = width,
24
  height = height,
25
  num_inference_steps = num_inference_steps,
26
- generator = generator
 
27
  ).images[0]
28
  return image, seed
29
 
30
  examples = [
31
- "A photo of a lavender cat",
32
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
33
- "An astronaut riding a green horse",
34
- "A delicious ceviche cheesecake slice",
35
  ]
36
 
37
  css="""
@@ -46,7 +64,7 @@ with gr.Blocks(css=css) as demo:
46
  with gr.Column(elem_id="col-container"):
47
  gr.Markdown(f"""
48
  # FLUX.1 Schnell
49
- Demo of the [FLUX.1 Schnell](https://huggingface.co/fal/AuraFlow) 12B parameters rectified flow transformer distilled from [FLUX.1 Pro](https://blackforestlabs.ai/) for fast generation in 4 steps
50
  [[blog](https://blackforestlabs.ai/2024/07/31/announcing-black-forest-labs/)] [[model](https://black-forest-labs/FLUX.1-schnell)]]
51
  """)
52
 
 
2
  import numpy as np
3
  import random
4
  import spaces
 
5
  import torch
6
+ from diffusers import FluxPipeline, FluxTransformer2DModel,FlowMatchEulerDiscreteScheduler, AutoencoderKL
7
+ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
+
9
+ dtype = torch.bfloat16
10
+ device = "cuda"
11
+
12
+ sd3_repo = "stabilityai/stable-diffusion-3-medium-diffusers"
13
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained (sd3_repo, subfolder="scheduler")
14
+ text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
+ text_encoder_2 = T5EncoderModel.from_pretrained(sd3_repo, subfolder="text_encoder_3", torch_dtype=dtype)
17
+ tokenizer_2 = T5TokenizerFast.from_pretrained(sd3_repo, subfolder="tokenizer_3", torch_dtype=dtype)
18
+ vae = AutoencoderKL.from_pretrained("diffusers-internal-dev/FLUX.1-schnell", subfolder="vae", torch_dtype=dtype)
19
+ transformer = FluxTransformer2DModel.from_pretrained("diffusers-internal-dev/FLUX.1-schnell", subfolder="transformer", torch_dtype=dtype)
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
23
+ pipe = FluxPipeline(
24
+ scheduler=scheduler,
25
+ text_encoder=text_encoder,
26
+ tokenizer=tokenizer,
27
+ text_encoder_2=text_encoder_2,
28
+ tokenizer_2=tokenizer_2,
29
+ vae=vae,
30
+ transformer=transformer,
31
  ).to("cuda")
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
+ MAX_IMAGE_SIZE = 2048
35
 
36
  @spaces.GPU()
37
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
 
41
  width = width,
42
  height = height,
43
  num_inference_steps = num_inference_steps,
44
+ generator = generator,
45
+ guidance_scale=0.0
46
  ).images[0]
47
  return image, seed
48
 
49
  examples = [
50
+ "a tiny astronaut hatching from an egg on the moon",
51
+ "a cat holding a sign that says hello world",
52
+ "an anime illustration of a wiener schnitzel",
 
53
  ]
54
 
55
  css="""
 
64
  with gr.Column(elem_id="col-container"):
65
  gr.Markdown(f"""
66
  # FLUX.1 Schnell
67
+ [FLUX.1 Schnell](https://huggingface.co/black-forest-labs/FLUX.1-schnell) demo 12B parameters rectified flow transformer distilled from [FLUX.1 Pro](https://blackforestlabs.ai/) for fast generation in 4 steps
68
  [[blog](https://blackforestlabs.ai/2024/07/31/announcing-black-forest-labs/)] [[model](https://black-forest-labs/FLUX.1-schnell)]]
69
  """)
70