Spaces:
vilarin
/
Running on Zero

vilarin commited on
Commit
a0f72b8
1 Parent(s): 187c32c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -35
app.py CHANGED
@@ -67,43 +67,19 @@ tokenizer_3 = AutoTokenizer.from_pretrained(
67
  torch_dtype=torch.float16,
68
  )
69
 
70
- torch.set_float32_matmul_precision("high")
71
-
72
- torch._inductor.config.conv_1x1_as_mm = True
73
- torch._inductor.config.coordinate_descent_tuning = True
74
- torch._inductor.config.epilogue_fusion = False
75
- torch._inductor.config.coordinate_descent_check_all_directions = True
76
 
77
  # Ensure model and scheduler are initialized in GPU-enabled function
78
- #if torch.cuda.is_available():
79
- pipe = StableDiffusion3Pipeline.from_pretrained(
80
- repo,
81
- tokenizer_3=tokenizer_3,
82
- text_encoder_3=text_encoder_3,
83
- torch_dtype=torch.float16).to("cuda")
84
- pipe2 = StableDiffusion3Img2ImgPipeline.from_pretrained(
85
- repo,
86
- tokenizer_3=tokenizer_3,
87
- text_encoder_3=text_encoder_3,
88
- torch_dtype=torch.float16).to("cuda")
89
-
90
- pipe.set_progress_bar_config(disable=True)
91
-
92
- pipe.transformer.to(memory_format=torch.channels_last)
93
- pipe.vae.to(memory_format=torch.channels_last)
94
-
95
- pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
96
- pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
97
-
98
-
99
- pipe2.set_progress_bar_config(disable=True)
100
-
101
- pipe2.transformer.to(memory_format=torch.channels_last)
102
- pipe2.vae.to(memory_format=torch.channels_last)
103
-
104
- pipe2.transformer = torch.compile(pipe2.transformer, mode="max-autotune", fullgraph=True)
105
- pipe2.vae.decode = torch.compile(pipe2.vae.decode, mode="max-autotune", fullgraph=True)
106
-
107
 
108
  pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
109
  pipe2.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe2.scheduler.config)
 
67
  torch_dtype=torch.float16,
68
  )
69
 
 
 
 
 
 
 
70
 
71
  # Ensure model and scheduler are initialized in GPU-enabled function
72
+ if torch.cuda.is_available():
73
+ pipe = StableDiffusion3Pipeline.from_pretrained(
74
+ repo,
75
+ tokenizer_3=tokenizer_3,
76
+ text_encoder_3=text_encoder_3,
77
+ torch_dtype=torch.float16).to("cuda")
78
+ pipe2 = StableDiffusion3Img2ImgPipeline.from_pretrained(
79
+ repo,
80
+ tokenizer_3=tokenizer_3,
81
+ text_encoder_3=text_encoder_3,
82
+ torch_dtype=torch.float16).to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
85
  pipe2.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe2.scheduler.config)