prithivMLmods commited on
Commit
f674364
1 Parent(s): 752fcb0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +724 -0
app.py ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import copy
4
+ import time
5
+ import random
6
+ import logging
7
+ import numpy as np
8
+ from typing import Any, Dict, List, Optional, Union
9
+
10
+ import torch
11
+ from PIL import Image
12
+ import gradio as gr
13
+
14
+ from diffusers import (
15
+ DiffusionPipeline,
16
+ AutoencoderTiny,
17
+ AutoencoderKL,
18
+ AutoPipelineForImage2Image,
19
+ FluxPipeline,
20
+ FlowMatchEulerDiscreteScheduler)
21
+
22
+ from huggingface_hub import (
23
+ hf_hub_download,
24
+ HfFileSystem,
25
+ ModelCard,
26
+ snapshot_download)
27
+
28
+ import spaces
29
+
30
+ def calculate_shift(
31
+ image_seq_len,
32
+ base_seq_len: int = 256,
33
+ max_seq_len: int = 4096,
34
+ base_shift: float = 0.5,
35
+ max_shift: float = 1.16,
36
+ ):
37
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
38
+ b = base_shift - m * base_seq_len
39
+ mu = image_seq_len * m + b
40
+ return mu
41
+
42
+ def retrieve_timesteps(
43
+ scheduler,
44
+ num_inference_steps: Optional[int] = None,
45
+ device: Optional[Union[str, torch.device]] = None,
46
+ timesteps: Optional[List[int]] = None,
47
+ sigmas: Optional[List[float]] = None,
48
+ **kwargs,
49
+ ):
50
+ if timesteps is not None and sigmas is not None:
51
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
52
+ if timesteps is not None:
53
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
54
+ timesteps = scheduler.timesteps
55
+ num_inference_steps = len(timesteps)
56
+ elif sigmas is not None:
57
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
58
+ timesteps = scheduler.timesteps
59
+ num_inference_steps = len(timesteps)
60
+ else:
61
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
62
+ timesteps = scheduler.timesteps
63
+ return timesteps, num_inference_steps
64
+
65
+ # FLUX pipeline
66
+ @torch.inference_mode()
67
+ def flux_pipe_call_that_returns_an_iterable_of_images(
68
+ self,
69
+ prompt: Union[str, List[str]] = None,
70
+ prompt_2: Optional[Union[str, List[str]]] = None,
71
+ height: Optional[int] = None,
72
+ width: Optional[int] = None,
73
+ num_inference_steps: int = 28,
74
+ timesteps: List[int] = None,
75
+ guidance_scale: float = 3.5,
76
+ num_images_per_prompt: Optional[int] = 1,
77
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
78
+ latents: Optional[torch.FloatTensor] = None,
79
+ prompt_embeds: Optional[torch.FloatTensor] = None,
80
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
81
+ output_type: Optional[str] = "pil",
82
+ return_dict: bool = True,
83
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
84
+ max_sequence_length: int = 512,
85
+ good_vae: Optional[Any] = None,
86
+ ):
87
+ height = height or self.default_sample_size * self.vae_scale_factor
88
+ width = width or self.default_sample_size * self.vae_scale_factor
89
+
90
+ self.check_inputs(
91
+ prompt,
92
+ prompt_2,
93
+ height,
94
+ width,
95
+ prompt_embeds=prompt_embeds,
96
+ pooled_prompt_embeds=pooled_prompt_embeds,
97
+ max_sequence_length=max_sequence_length,
98
+ )
99
+
100
+ self._guidance_scale = guidance_scale
101
+ self._joint_attention_kwargs = joint_attention_kwargs
102
+ self._interrupt = False
103
+
104
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
105
+ device = self._execution_device
106
+
107
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
108
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
109
+ prompt=prompt,
110
+ prompt_2=prompt_2,
111
+ prompt_embeds=prompt_embeds,
112
+ pooled_prompt_embeds=pooled_prompt_embeds,
113
+ device=device,
114
+ num_images_per_prompt=num_images_per_prompt,
115
+ max_sequence_length=max_sequence_length,
116
+ lora_scale=lora_scale,
117
+ )
118
+
119
+ num_channels_latents = self.transformer.config.in_channels // 4
120
+ latents, latent_image_ids = self.prepare_latents(
121
+ batch_size * num_images_per_prompt,
122
+ num_channels_latents,
123
+ height,
124
+ width,
125
+ prompt_embeds.dtype,
126
+ device,
127
+ generator,
128
+ latents,
129
+ )
130
+
131
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
132
+ image_seq_len = latents.shape[1]
133
+ mu = calculate_shift(
134
+ image_seq_len,
135
+ self.scheduler.config.base_image_seq_len,
136
+ self.scheduler.config.max_image_seq_len,
137
+ self.scheduler.config.base_shift,
138
+ self.scheduler.config.max_shift,
139
+ )
140
+ timesteps, num_inference_steps = retrieve_timesteps(
141
+ self.scheduler,
142
+ num_inference_steps,
143
+ device,
144
+ timesteps,
145
+ sigmas,
146
+ mu=mu,
147
+ )
148
+ self._num_timesteps = len(timesteps)
149
+
150
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
151
+
152
+ for i, t in enumerate(timesteps):
153
+ if self.interrupt:
154
+ continue
155
+
156
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
157
+
158
+ noise_pred = self.transformer(
159
+ hidden_states=latents,
160
+ timestep=timestep / 1000,
161
+ guidance=guidance,
162
+ pooled_projections=pooled_prompt_embeds,
163
+ encoder_hidden_states=prompt_embeds,
164
+ txt_ids=text_ids,
165
+ img_ids=latent_image_ids,
166
+ joint_attention_kwargs=self.joint_attention_kwargs,
167
+ return_dict=False,
168
+ )[0]
169
+
170
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
171
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
172
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
173
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
174
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
175
+ torch.cuda.empty_cache()
176
+
177
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
178
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
179
+ image = good_vae.decode(latents, return_dict=False)[0]
180
+ self.maybe_free_model_hooks()
181
+ torch.cuda.empty_cache()
182
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
183
+
184
+ #-----------------------------------------------------------------------------------LoRA's--------------------------------------------------------------------------#
185
+ loras = [
186
+ #1
187
+ {
188
+ "image": "https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-FaceRealism/resolve/main/images/11.png",
189
+ "title": "Flux Face Realism",
190
+ "repo": "prithivMLmods/Canopus-LoRA-Flux-FaceRealism",
191
+ "trigger_word": "Realism"
192
+ },
193
+ #2
194
+ {
195
+ "image": "https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00134_.png",
196
+ "title": "Softserve Anime",
197
+ "repo": "alvdansen/softserve_anime",
198
+ "trigger_word": "sftsrv style illustration"
199
+ },
200
+ #3
201
+ {
202
+ "image": "https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-Anime/resolve/main/assets/4.png",
203
+ "title": "Flux Anime",
204
+ "repo": "prithivMLmods/Canopus-LoRA-Flux-Anime",
205
+ "trigger_word": "Anime"
206
+ },
207
+ #4
208
+ {
209
+ "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-One-Click-Creative-Template/resolve/main/images/f2cc649985648e57b9b9b14ca7a8744ac8e50d75b3a334ed4df0f368.jpg",
210
+ "title": "Creative Template",
211
+ "repo": "Shakker-Labs/FLUX.1-dev-LoRA-One-Click-Creative-Template",
212
+ "trigger_word": "The background is 4 real photos, and in the middle is a cartoon picture summarizing the real photos."
213
+ },
214
+ #5
215
+ {
216
+ "image": "https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0/resolve/main/images/3.png",
217
+ "title": "Ultra Realism",
218
+ "repo": "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0",
219
+ "trigger_word": "Ultra realistic"
220
+ },
221
+ #6
222
+ {
223
+ "image": "https://huggingface.co/gokaygokay/Flux-Game-Assets-LoRA-v2/resolve/main/images/example_y2bqpuphc.png",
224
+ "title": "Game Assets",
225
+ "repo": "gokaygokay/Flux-Game-Assets-LoRA-v2",
226
+ "trigger_word": "wbgmsst, white background"
227
+ },
228
+ #7
229
+ {
230
+ "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
231
+ "title": "Softpasty",
232
+ "repo": "alvdansen/softpasty-flux-dev",
233
+ "trigger_word": "araminta_illus illustration style"
234
+ },
235
+ #8
236
+ {
237
+ "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-add-details/resolve/main/images/0.png",
238
+ "title": "Details Add",
239
+ "repo": "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
240
+ "trigger_word": ""
241
+ },
242
+ #9
243
+ {
244
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T010011.238.jpeg",
245
+ "title": "Frosting Lane",
246
+ "repo": "alvdansen/frosting_lane_flux",
247
+ "trigger_word": "frstingln illustration"
248
+ },
249
+ #10
250
+ {
251
+ "image": "https://huggingface.co/aleksa-codes/flux-ghibsky-illustration/resolve/main/images/example5.jpg",
252
+ "title": "Ghibsky Illustration",
253
+ "repo": "aleksa-codes/flux-ghibsky-illustration",
254
+ "trigger_word": "GHIBSKY style painting"
255
+ },
256
+ #11
257
+ {
258
+ "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-Dark-Fantasy/resolve/main/images/c2215bd73da9f14fcd63cc93350e66e2901bdafa6fb8abaaa2c32a1b.jpg",
259
+ "title": "Dark Fantasy",
260
+ "repo": "Shakker-Labs/FLUX.1-dev-LoRA-Dark-Fantasy",
261
+ "trigger_word": ""
262
+ },
263
+ #12
264
+ {
265
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/d13591878d5043f3989dd6eb1c25b710_233c18effb4b491cb467ca31c97e90b5.png",
266
+ "title": "Paper Cutout",
267
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
268
+ "trigger_word": "Paper Cutout Style"
269
+ },
270
+ #13
271
+ {
272
+ "image": "https://huggingface.co/alvdansen/mooniverse/resolve/main/images/out-0%20(17).webp",
273
+ "title": "Mooniverse",
274
+ "repo": "alvdansen/mooniverse",
275
+ "trigger_word": "surreal style"
276
+ },
277
+ #14
278
+ {
279
+ "image": "https://huggingface.co/alvdansen/pola-photo-flux/resolve/main/images/out-0%20-%202024-09-22T130819.351.webp",
280
+ "title": "Pola Photo",
281
+ "repo": "alvdansen/pola-photo-flux",
282
+ "trigger_word": "polaroid style"
283
+ },
284
+ #15
285
+ {
286
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/7e180627edd846e899b6cd307339140d_5b2a09f0842c476b83b6bd2cb9143a52.png",
287
+ "title": "Flux Tarot",
288
+ "repo": "multimodalart/flux-tarot-v1",
289
+ "trigger_word": "in the style of TOK a trtcrd tarot style"
290
+ },
291
+ #16
292
+ {
293
+ "image": "https://huggingface.co/prithivMLmods/Flux-Dev-Real-Anime-LoRA/resolve/main/images/111.png",
294
+ "title": "Real Anime",
295
+ "repo": "prithivMLmods/Flux-Dev-Real-Anime-LoRA",
296
+ "trigger_word": "Real Anime"
297
+ },
298
+ #17
299
+ {
300
+ "image": "https://huggingface.co/diabolic6045/Flux_Sticker_Lora/resolve/main/images/example_s3pxsewcb.png",
301
+ "title": "Stickers",
302
+ "repo": "diabolic6045/Flux_Sticker_Lora",
303
+ "trigger_word": "5t1cker 5ty1e"
304
+ },
305
+ #18
306
+ {
307
+ "image": "https://huggingface.co/VideoAditor/Flux-Lora-Realism/resolve/main/images/feel-the-difference-between-using-flux-with-lora-from-xlab-v0-j0ehybmvxehd1.png",
308
+ "title": "Realism",
309
+ "repo": "XLabs-AI/flux-RealismLora",
310
+ "trigger_word": ""
311
+ },
312
+ #19
313
+ {
314
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00583_%20(1).png",
315
+ "title": "Koda",
316
+ "repo": "alvdansen/flux-koda",
317
+ "trigger_word": "flmft style"
318
+ },
319
+ #20
320
+ {
321
+ "image": "https://huggingface.co/mgwr/Cine-Aesthetic/resolve/main/images/00019-1333633802.png",
322
+ "title": "Cine Aesthetic",
323
+ "repo": "mgwr/Cine-Aesthetic",
324
+ "trigger_word": "mgwr/cine"
325
+ },
326
+ #21
327
+ {
328
+ "image": "https://huggingface.co/SebastianBodza/flux_cute3D/resolve/main/images/astronaut.webp",
329
+ "title": "Cute 3D",
330
+ "repo": "SebastianBodza/flux_cute3D",
331
+ "trigger_word": "NEOCUTE3D"
332
+ },
333
+ #22
334
+ {
335
+ "image": "https://huggingface.co/bingbangboom/flux_dreamscape/resolve/main/images/3.jpg",
336
+ "title": "Dreamscape",
337
+ "repo": "bingbangboom/flux_dreamscape",
338
+ "trigger_word": "in the style of BSstyle004"
339
+ },
340
+ #23
341
+ {
342
+ "image": "https://huggingface.co/prithivMLmods/Canopus-Cute-Kawaii-Flux-LoRA/resolve/main/images/11.png",
343
+ "title": "Cute Kawaii",
344
+ "repo": "prithivMLmods/Canopus-Cute-Kawaii-Flux-LoRA",
345
+ "trigger_word": "cute-kawaii"
346
+ },
347
+ #24
348
+ {
349
+ "image": "https://cdn-uploads.huggingface.co/production/uploads/64b24543eec33e27dc9a6eca/_jyra-jKP_prXhzxYkg1O.png",
350
+ "title": "Pastel Anime",
351
+ "repo": "Raelina/Flux-Pastel-Anime",
352
+ "trigger_word": "Anime"
353
+ },
354
+ #25
355
+ {
356
+ "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-Vector-Journey/resolve/main/images/f7a66b51c89896854f31bef743dc30f33c6ea3c0ed8f9ff04d24b702.jpg",
357
+ "title": "Vector",
358
+ "repo": "Shakker-Labs/FLUX.1-dev-LoRA-Vector-Journey",
359
+ "trigger_word": "artistic style blends reality and illustration elements"
360
+ },
361
+ #26
362
+ {
363
+ "image": "https://huggingface.co/bingbangboom/flux-miniature-worlds/resolve/main/images/2.jpg",
364
+ "title": "Miniature",
365
+ "repo": "bingbangboom/flux-miniature-worlds",
366
+ "trigger_word": "Image in the style of MNTRWRLDS"
367
+ },
368
+ #27
369
+ {
370
+ "image": "https://huggingface.co/glif-loradex-trainer/bingbangboom_flux_surf/resolve/main/samples/1729012111574__000002000_0.jpg",
371
+ "title": "Surf Bingbangboom",
372
+ "repo": "glif-loradex-trainer/bingbangboom_flux_surf",
373
+ "trigger_word": "SRFNGV01"
374
+ },
375
+ #28
376
+ {
377
+ "image": "https://huggingface.co/prithivMLmods/Canopus-Snoopy-Charlie-Brown-Flux-LoRA/resolve/main/000.png",
378
+ "title": "Snoopy Charlie",
379
+ "repo": "prithivMLmods/Canopus-Snoopy-Charlie-Brown-Flux-LoRA",
380
+ "trigger_word": "Snoopy Charlie Brown"
381
+ },
382
+ #29
383
+ {
384
+ "image": "https://huggingface.co/alvdansen/sonny-anime-fixed/resolve/main/images/uqAuIMqA6Z7mvPkHg4qJE_f4c3cbe64e0349e7b946d02adeacdca3.png",
385
+ "title": "Fixed Sonny",
386
+ "repo": "alvdansen/sonny-anime-fixed",
387
+ "trigger_word": "nm22 style"
388
+ },
389
+ #30
390
+ {
391
+ "image": "https://huggingface.co/davisbro/flux-multi-angle/resolve/main/multi-angle-examples/3.png",
392
+ "title": "Multi Angle",
393
+ "repo": "davisbro/flux-multi-angle",
394
+ "trigger_word": "in the style of TOK"
395
+ }
396
+ #add--new LoRA Below ↓ - Before that Use(,)
397
+ ]
398
+
399
+ #--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
400
+
401
+ dtype = torch.bfloat16
402
+ device = "cuda" if torch.cuda.is_available() else "cpu"
403
+ base_model = "black-forest-labs/FLUX.1-dev"
404
+
405
+ #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
406
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
407
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
408
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
409
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
410
+ vae=good_vae,
411
+ transformer=pipe.transformer,
412
+ text_encoder=pipe.text_encoder,
413
+ tokenizer=pipe.tokenizer,
414
+ text_encoder_2=pipe.text_encoder_2,
415
+ tokenizer_2=pipe.tokenizer_2,
416
+ torch_dtype=dtype
417
+ )
418
+
419
+ MAX_SEED = 2**32-1
420
+
421
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
422
+
423
+ class calculateDuration:
424
+ def __init__(self, activity_name=""):
425
+ self.activity_name = activity_name
426
+
427
+ def __enter__(self):
428
+ self.start_time = time.time()
429
+ return self
430
+
431
+ def __exit__(self, exc_type, exc_value, traceback):
432
+ self.end_time = time.time()
433
+ self.elapsed_time = self.end_time - self.start_time
434
+ if self.activity_name:
435
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
436
+ else:
437
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
438
+
439
+ def update_selection(evt: gr.SelectData, width, height):
440
+ selected_lora = loras[evt.index]
441
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
442
+ lora_repo = selected_lora["repo"]
443
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
444
+ if "aspect" in selected_lora:
445
+ if selected_lora["aspect"] == "portrait":
446
+ width = 768
447
+ height = 1024
448
+ elif selected_lora["aspect"] == "landscape":
449
+ width = 1024
450
+ height = 768
451
+ else:
452
+ width = 1024
453
+ height = 1024
454
+ return (
455
+ gr.update(placeholder=new_placeholder),
456
+ updated_text,
457
+ evt.index,
458
+ width,
459
+ height,
460
+ )
461
+
462
+ @spaces.GPU(duration=70)
463
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
464
+ pipe.to("cuda")
465
+ generator = torch.Generator(device="cuda").manual_seed(seed)
466
+ with calculateDuration("Generating image"):
467
+ # Generate image
468
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
469
+ prompt=prompt_mash,
470
+ num_inference_steps=steps,
471
+ guidance_scale=cfg_scale,
472
+ width=width,
473
+ height=height,
474
+ generator=generator,
475
+ joint_attention_kwargs={"scale": lora_scale},
476
+ output_type="pil",
477
+ good_vae=good_vae,
478
+ ):
479
+ yield img
480
+
481
+ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
482
+ generator = torch.Generator(device="cuda").manual_seed(seed)
483
+ pipe_i2i.to("cuda")
484
+ image_input = load_image(image_input_path)
485
+ final_image = pipe_i2i(
486
+ prompt=prompt_mash,
487
+ image=image_input,
488
+ strength=image_strength,
489
+ num_inference_steps=steps,
490
+ guidance_scale=cfg_scale,
491
+ width=width,
492
+ height=height,
493
+ generator=generator,
494
+ joint_attention_kwargs={"scale": lora_scale},
495
+ output_type="pil",
496
+ ).images[0]
497
+ return final_image
498
+
499
+ @spaces.GPU(duration=70)
500
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
501
+ if selected_index is None:
502
+ raise gr.Error("You must select a LoRA before proceeding.")
503
+ selected_lora = loras[selected_index]
504
+ lora_path = selected_lora["repo"]
505
+ trigger_word = selected_lora["trigger_word"]
506
+ if(trigger_word):
507
+ if "trigger_position" in selected_lora:
508
+ if selected_lora["trigger_position"] == "prepend":
509
+ prompt_mash = f"{trigger_word} {prompt}"
510
+ else:
511
+ prompt_mash = f"{prompt} {trigger_word}"
512
+ else:
513
+ prompt_mash = f"{trigger_word} {prompt}"
514
+ else:
515
+ prompt_mash = prompt
516
+
517
+ with calculateDuration("Unloading LoRA"):
518
+ pipe.unload_lora_weights()
519
+ pipe_i2i.unload_lora_weights()
520
+
521
+ #LoRA weights flow
522
+ with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
523
+ pipe_to_use = pipe_i2i if image_input is not None else pipe
524
+ weight_name = selected_lora.get("weights", None)
525
+
526
+ pipe_to_use.load_lora_weights(
527
+ lora_path,
528
+ weight_name=weight_name,
529
+ low_cpu_mem_usage=True
530
+ )
531
+
532
+ with calculateDuration("Randomizing seed"):
533
+ if randomize_seed:
534
+ seed = random.randint(0, MAX_SEED)
535
+
536
+ if(image_input is not None):
537
+
538
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
539
+ yield final_image, seed, gr.update(visible=False)
540
+ else:
541
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
542
+
543
+ final_image = None
544
+ step_counter = 0
545
+ for image in image_generator:
546
+ step_counter+=1
547
+ final_image = image
548
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
549
+ yield image, seed, gr.update(value=progress_bar, visible=True)
550
+
551
+ yield final_image, seed, gr.update(value=progress_bar, visible=False)
552
+
553
+ def get_huggingface_safetensors(link):
554
+ split_link = link.split("/")
555
+ if(len(split_link) == 2):
556
+ model_card = ModelCard.load(link)
557
+ base_model = model_card.data.get("base_model")
558
+ print(base_model)
559
+
560
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
561
+ raise Exception("Flux LoRA Not Found!")
562
+ # Only allow "black-forest-labs/FLUX.1-dev"
563
+ #if base_model != "black-forest-labs/FLUX.1-dev":
564
+ #raise Exception("Only FLUX.1-dev is supported, other LoRA models are not allowed!")
565
+
566
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
567
+ trigger_word = model_card.data.get("instance_prompt", "")
568
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
569
+ fs = HfFileSystem()
570
+ try:
571
+ list_of_files = fs.ls(link, detail=False)
572
+ for file in list_of_files:
573
+ if(file.endswith(".safetensors")):
574
+ safetensors_name = file.split("/")[-1]
575
+ if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
576
+ image_elements = file.split("/")
577
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
578
+ except Exception as e:
579
+ print(e)
580
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
581
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
582
+ return split_link[1], link, safetensors_name, trigger_word, image_url
583
+
584
+ def check_custom_model(link):
585
+ if(link.startswith("https://")):
586
+ if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
587
+ link_split = link.split("huggingface.co/")
588
+ return get_huggingface_safetensors(link_split[1])
589
+ else:
590
+ return get_huggingface_safetensors(link)
591
+
592
+ def add_custom_lora(custom_lora):
593
+ global loras
594
+ if(custom_lora):
595
+ try:
596
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
597
+ print(f"Loaded custom LoRA: {repo}")
598
+ card = f'''
599
+ <div class="custom_lora_card">
600
+ <span>Loaded custom LoRA:</span>
601
+ <div class="card_internal">
602
+ <img src="{image}" />
603
+ <div>
604
+ <h3>{title}</h3>
605
+ <small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
606
+ </div>
607
+ </div>
608
+ </div>
609
+ '''
610
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
611
+ if(not existing_item_index):
612
+ new_item = {
613
+ "image": image,
614
+ "title": title,
615
+ "repo": repo,
616
+ "weights": path,
617
+ "trigger_word": trigger_word
618
+ }
619
+ print(new_item)
620
+ existing_item_index = len(loras)
621
+ loras.append(new_item)
622
+
623
+ return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
624
+ except Exception as e:
625
+ gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
626
+ return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
627
+ else:
628
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
629
+
630
+ def remove_custom_lora():
631
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
632
+
633
+ run_lora.zerogpu = True
634
+
635
+ css = '''
636
+ #gen_btn{height: 100%}
637
+ #gen_column{align-self: stretch}
638
+ #title{text-align: center}
639
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
640
+ #title img{width: 100px; margin-right: 0.5em}
641
+ #gallery .grid-wrap{height: 10vh}
642
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
643
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
644
+ .card_internal img{margin-right: 1em}
645
+ .styler{--form-gap-width: 0px !important}
646
+ #progress{height:30px}
647
+ #progress .generating{display:none}
648
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
649
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
650
+ '''
651
+
652
+ with gr.Blocks(theme="prithivMLmods/Minecraft-Theme", css=css, delete_cache=(60, 3600)) as app:
653
+ title = gr.HTML(
654
+ """<h1>FLUX LoRA DLC🥳</h1>""",
655
+ elem_id="title",
656
+ )
657
+ selected_index = gr.State(None)
658
+ with gr.Row():
659
+ with gr.Column(scale=3):
660
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Choose the LoRA and type the prompt")
661
+ with gr.Column(scale=1, elem_id="gen_column"):
662
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
663
+ with gr.Row():
664
+ with gr.Column():
665
+ selected_info = gr.Markdown("")
666
+ gallery = gr.Gallery(
667
+ [(item["image"], item["title"]) for item in loras],
668
+ label="LoRA DLC's",
669
+ allow_preview=False,
670
+ columns=3,
671
+ elem_id="gallery",
672
+ show_share_button=False
673
+ )
674
+ with gr.Group():
675
+ custom_lora = gr.Textbox(label="Enter Custom LoRA", placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime")
676
+ gr.Markdown("[Check the list of FLUX LoRA's](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
677
+ custom_lora_info = gr.HTML(visible=False)
678
+ custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
679
+ with gr.Column():
680
+ progress_bar = gr.Markdown(elem_id="progress",visible=False)
681
+ result = gr.Image(label="Generated Image")
682
+
683
+ with gr.Row():
684
+ with gr.Accordion("Advanced Settings", open=False):
685
+ with gr.Row():
686
+ input_image = gr.Image(label="Input image", type="filepath")
687
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
688
+ with gr.Column():
689
+ with gr.Row():
690
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
691
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
692
+
693
+ with gr.Row():
694
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
695
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
696
+
697
+ with gr.Row():
698
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
699
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
700
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
701
+
702
+ gallery.select(
703
+ update_selection,
704
+ inputs=[width, height],
705
+ outputs=[prompt, selected_info, selected_index, width, height]
706
+ )
707
+ custom_lora.input(
708
+ add_custom_lora,
709
+ inputs=[custom_lora],
710
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
711
+ )
712
+ custom_lora_button.click(
713
+ remove_custom_lora,
714
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
715
+ )
716
+ gr.on(
717
+ triggers=[generate_button.click, prompt.submit],
718
+ fn=run_lora,
719
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
720
+ outputs=[result, seed, progress_bar]
721
+ )
722
+
723
+ app.queue()
724
+ app.launch()