cngsm commited on
Commit
78fb966
1 Parent(s): d349cee

Upload 9 files

Browse files
Files changed (9) hide show
  1. README.md +40 -6
  2. app.py +584 -0
  3. custom.png +0 -0
  4. flux_lora.png +0 -0
  5. gitattributes +35 -0
  6. live_preview_helpers.py +166 -0
  7. loras.json +219 -0
  8. prompts.csv +242 -0
  9. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,47 @@
1
  ---
2
- title: Lrha
3
- emoji: 🦀
4
  colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.1.0
8
  app_file: app.py
9
- pinned: false
10
- short_description: flux_lora
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: FLUX LoRa Lab
3
+ emoji: 🧪
4
  colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.1.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ models:
12
+ - black-forest-labs/FLUX.1-dev
13
+ - renderartist/retrocomicflux
14
+ - glif/l0w-r3z
15
+ - Purz/vhs-box
16
+ - renderartist/simplevectorflux
17
+ - glif/anime-blockprint-style
18
+ - multimodalart/flux-tarot-v1
19
+ - alvdansen/pola-photo-flux
20
+ - dvyio/flux-lora-the-sims
21
+ - alvdansen/softpasty-flux-dev
22
+ - dvyio/flux-lora-film-noir
23
+ - AIWarper/RubberCore1920sCartoonStyle
24
+ - Norod78/JojosoStyle-flux-lora
25
+ - XLabs-AI/flux-RealismLora
26
+ - multimodalart/vintage-ads-flux
27
+ - glif/how2draw
28
+ - mgwr/Cine-Aesthetic
29
+ - sWizad/pokemon-trainer-sprites-pixelart-flux
30
+ - nerijs/animation2k-flux
31
+ - alvdansen/softserve_anime
32
+ - veryVANYA/ps1-style-flux
33
+ - alvdansen/flux-koda
34
+ - alvdansen/frosting_lane_flux
35
+ - davisbro/half_illustration
36
+ - fofr/flux-wrong
37
+ - linoyts/yarn_art_Flux_LoRA
38
+ - Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style
39
+ - SebastianBodza/Flux_Aquarell_Watercolor_v2
40
+ - dataautogpt3/FLUX-SyntheticAnime
41
+ - fofr/flux-80s-cyberpunk
42
+ - kudzueye/boreal-flux-dev-v2
43
+ - XLabs-AI/flux-lora-collection
44
+ - martintomov/retrofuturism-flux
45
  ---
46
 
47
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import json
4
+ import logging
5
+ import torch
6
+ from PIL import Image
7
+ import spaces
8
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
9
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
+ from diffusers.utils import load_image
11
+ from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
12
+ import copy
13
+ import random
14
+ import time
15
+ import requests
16
+ import pandas as pd
17
+
18
+ #Load prompts for randomization
19
+ df = pd.read_csv('prompts.csv', header=None)
20
+ prompt_values = df.values.flatten()
21
+
22
+ # Load LoRAs from JSON file
23
+ with open('loras.json', 'r') as f:
24
+ loras = json.load(f)
25
+
26
+ # Initialize the base model
27
+ dtype = torch.bfloat16
28
+ device = "cuda" if torch.cuda.is_available() else "cpu"
29
+ base_model = "black-forest-labs/FLUX.1-dev"
30
+
31
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
32
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
33
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
34
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
35
+ base_model,
36
+ vae=good_vae,
37
+ transformer=pipe.transformer,
38
+ text_encoder=pipe.text_encoder,
39
+ tokenizer=pipe.tokenizer,
40
+ text_encoder_2=pipe.text_encoder_2,
41
+ tokenizer_2=pipe.tokenizer_2,
42
+ torch_dtype=dtype
43
+ )
44
+
45
+ MAX_SEED = 2**32 - 1
46
+
47
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
48
+
49
+ class calculateDuration:
50
+ def __init__(self, activity_name=""):
51
+ self.activity_name = activity_name
52
+
53
+ def __enter__(self):
54
+ self.start_time = time.time()
55
+ return self
56
+
57
+ def __exit__(self, exc_type, exc_value, traceback):
58
+ self.end_time = time.time()
59
+ self.elapsed_time = self.end_time - self.start_time
60
+ if self.activity_name:
61
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
62
+ else:
63
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
64
+
65
+ def download_file(url, directory=None):
66
+ if directory is None:
67
+ directory = os.getcwd() # Use current working directory if not specified
68
+
69
+ # Get the filename from the URL
70
+ filename = url.split('/')[-1]
71
+
72
+ # Full path for the downloaded file
73
+ filepath = os.path.join(directory, filename)
74
+
75
+ # Download the file
76
+ response = requests.get(url)
77
+ response.raise_for_status() # Raise an exception for bad status codes
78
+
79
+ # Write the content to the file
80
+ with open(filepath, 'wb') as file:
81
+ file.write(response.content)
82
+
83
+ return filepath
84
+
85
+ def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
86
+ selected_index = evt.index
87
+ selected_indices = selected_indices or []
88
+ if selected_index in selected_indices:
89
+ selected_indices.remove(selected_index)
90
+ else:
91
+ if len(selected_indices) < 2:
92
+ selected_indices.append(selected_index)
93
+ else:
94
+ gr.Warning("You can select up to 2 LoRAs, remove one to select a new one.")
95
+ return gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), width, height, gr.update(), gr.update()
96
+
97
+ selected_info_1 = "Select a LoRA 1"
98
+ selected_info_2 = "Select a LoRA 2"
99
+ lora_scale_1 = 1.15
100
+ lora_scale_2 = 1.15
101
+ lora_image_1 = None
102
+ lora_image_2 = None
103
+ if len(selected_indices) >= 1:
104
+ lora1 = loras_state[selected_indices[0]]
105
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
106
+ lora_image_1 = lora1['image']
107
+ if len(selected_indices) >= 2:
108
+ lora2 = loras_state[selected_indices[1]]
109
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
110
+ lora_image_2 = lora2['image']
111
+
112
+ if selected_indices:
113
+ last_selected_lora = loras_state[selected_indices[-1]]
114
+ new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
115
+ else:
116
+ new_placeholder = "Type a prompt after selecting a LoRA"
117
+
118
+ return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2
119
+
120
+ def remove_lora_1(selected_indices, loras_state):
121
+ if len(selected_indices) >= 1:
122
+ selected_indices.pop(0)
123
+ selected_info_1 = "Select a LoRA 1"
124
+ selected_info_2 = "Select a LoRA 2"
125
+ lora_scale_1 = 1.15
126
+ lora_scale_2 = 1.15
127
+ lora_image_1 = None
128
+ lora_image_2 = None
129
+ if len(selected_indices) >= 1:
130
+ lora1 = loras_state[selected_indices[0]]
131
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
132
+ lora_image_1 = lora1['image']
133
+ if len(selected_indices) >= 2:
134
+ lora2 = loras_state[selected_indices[1]]
135
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
136
+ lora_image_2 = lora2['image']
137
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
138
+
139
+ def remove_lora_2(selected_indices, loras_state):
140
+ if len(selected_indices) >= 2:
141
+ selected_indices.pop(1)
142
+ selected_info_1 = "Select LoRA 1"
143
+ selected_info_2 = "Select LoRA 2"
144
+ lora_scale_1 = 1.15
145
+ lora_scale_2 = 1.15
146
+ lora_image_1 = None
147
+ lora_image_2 = None
148
+ if len(selected_indices) >= 1:
149
+ lora1 = loras_state[selected_indices[0]]
150
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
151
+ lora_image_1 = lora1['image']
152
+ if len(selected_indices) >= 2:
153
+ lora2 = loras_state[selected_indices[1]]
154
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
155
+ lora_image_2 = lora2['image']
156
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
157
+
158
+ def randomize_loras(selected_indices, loras_state):
159
+ if len(loras_state) < 2:
160
+ raise gr.Error("Not enough LoRAs to randomize.")
161
+ selected_indices = random.sample(range(len(loras_state)), 2)
162
+ lora1 = loras_state[selected_indices[0]]
163
+ lora2 = loras_state[selected_indices[1]]
164
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
165
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
166
+ lora_scale_1 = 1.15
167
+ lora_scale_2 = 1.15
168
+ lora_image_1 = lora1['image']
169
+ lora_image_2 = lora2['image']
170
+ random_prompt = random.choice(prompt_values)
171
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
172
+
173
+ def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
174
+ if custom_lora:
175
+ try:
176
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
177
+ print(f"Loaded custom LoRA: {repo}")
178
+ existing_item_index = next((index for (index, item) in enumerate(current_loras) if item['repo'] == repo), None)
179
+ if existing_item_index is None:
180
+ if repo.endswith(".safetensors") and repo.startswith("http"):
181
+ repo = download_file(repo)
182
+ new_item = {
183
+ "image": image if image else "/home/user/app/custom.png",
184
+ "title": title,
185
+ "repo": repo,
186
+ "weights": path,
187
+ "trigger_word": trigger_word
188
+ }
189
+ print(f"New LoRA: {new_item}")
190
+ existing_item_index = len(current_loras)
191
+ current_loras.append(new_item)
192
+
193
+ # Update gallery
194
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
195
+ # Update selected_indices if there's room
196
+ if len(selected_indices) < 2:
197
+ selected_indices.append(existing_item_index)
198
+ else:
199
+ gr.Warning("You can select up to 2 LoRAs, remove one to select a new one.")
200
+
201
+ # Update selected_info and images
202
+ selected_info_1 = "Select a LoRA 1"
203
+ selected_info_2 = "Select a LoRA 2"
204
+ lora_scale_1 = 1.15
205
+ lora_scale_2 = 1.15
206
+ lora_image_1 = None
207
+ lora_image_2 = None
208
+ if len(selected_indices) >= 1:
209
+ lora1 = current_loras[selected_indices[0]]
210
+ selected_info_1 = f"### LoRA 1 Selected: {lora1['title']} ✨"
211
+ lora_image_1 = lora1['image'] if lora1['image'] else None
212
+ if len(selected_indices) >= 2:
213
+ lora2 = current_loras[selected_indices[1]]
214
+ selected_info_2 = f"### LoRA 2 Selected: {lora2['title']} ✨"
215
+ lora_image_2 = lora2['image'] if lora2['image'] else None
216
+ print("Finished adding custom LoRA")
217
+ return (
218
+ current_loras,
219
+ gr.update(value=gallery_items),
220
+ selected_info_1,
221
+ selected_info_2,
222
+ selected_indices,
223
+ lora_scale_1,
224
+ lora_scale_2,
225
+ lora_image_1,
226
+ lora_image_2
227
+ )
228
+ except Exception as e:
229
+ print(e)
230
+ gr.Warning(str(e))
231
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
232
+ else:
233
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
234
+
235
+ def remove_custom_lora(selected_indices, current_loras, gallery):
236
+ if current_loras:
237
+ custom_lora_repo = current_loras[-1]['repo']
238
+ # Remove from loras list
239
+ current_loras = current_loras[:-1]
240
+ # Remove from selected_indices if selected
241
+ custom_lora_index = len(current_loras)
242
+ if custom_lora_index in selected_indices:
243
+ selected_indices.remove(custom_lora_index)
244
+ # Update gallery
245
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
246
+ # Update selected_info and images
247
+ selected_info_1 = "Select a LoRA 1"
248
+ selected_info_2 = "Select a LoRA 2"
249
+ lora_scale_1 = 1.15
250
+ lora_scale_2 = 1.15
251
+ lora_image_1 = None
252
+ lora_image_2 = None
253
+ if len(selected_indices) >= 1:
254
+ lora1 = current_loras[selected_indices[0]]
255
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
256
+ lora_image_1 = lora1['image']
257
+ if len(selected_indices) >= 2:
258
+ lora2 = current_loras[selected_indices[1]]
259
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
260
+ lora_image_2 = lora2['image']
261
+ return (
262
+ current_loras,
263
+ gr.update(value=gallery_items),
264
+ selected_info_1,
265
+ selected_info_2,
266
+ selected_indices,
267
+ lora_scale_1,
268
+ lora_scale_2,
269
+ lora_image_1,
270
+ lora_image_2
271
+ )
272
+
273
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
274
+ print("Generating image...")
275
+ pipe.to("cuda")
276
+ generator = torch.Generator(device="cuda").manual_seed(seed)
277
+ with calculateDuration("Generating image"):
278
+ # Generate image
279
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
280
+ prompt=prompt_mash,
281
+ num_inference_steps=steps,
282
+ guidance_scale=cfg_scale,
283
+ width=width,
284
+ height=height,
285
+ generator=generator,
286
+ joint_attention_kwargs={"scale": 1.0},
287
+ output_type="pil",
288
+ good_vae=good_vae,
289
+ ):
290
+ yield img
291
+
292
+ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
293
+ pipe_i2i.to("cuda")
294
+ generator = torch.Generator(device="cuda").manual_seed(seed)
295
+ image_input = load_image(image_input_path)
296
+ final_image = pipe_i2i(
297
+ prompt=prompt_mash,
298
+ image=image_input,
299
+ strength=image_strength,
300
+ num_inference_steps=steps,
301
+ guidance_scale=cfg_scale,
302
+ width=width,
303
+ height=height,
304
+ generator=generator,
305
+ joint_attention_kwargs={"scale": 1.0},
306
+ output_type="pil",
307
+ ).images[0]
308
+ return final_image
309
+
310
+ @spaces.GPU(duration=75)
311
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
312
+ if not selected_indices:
313
+ raise gr.Error("You must select at least one LoRA before proceeding.")
314
+
315
+ selected_loras = [loras_state[idx] for idx in selected_indices]
316
+
317
+ # Build the prompt with trigger words
318
+ prepends = []
319
+ appends = []
320
+ for lora in selected_loras:
321
+ trigger_word = lora.get('trigger_word', '')
322
+ if trigger_word:
323
+ if lora.get("trigger_position") == "prepend":
324
+ prepends.append(trigger_word)
325
+ else:
326
+ appends.append(trigger_word)
327
+ prompt_mash = " ".join(prepends + [prompt] + appends)
328
+ print("Prompt Mash: ", prompt_mash)
329
+ # Unload previous LoRA weights
330
+ with calculateDuration("Unloading LoRA"):
331
+ pipe.unload_lora_weights()
332
+ pipe_i2i.unload_lora_weights()
333
+
334
+ print(pipe.get_active_adapters())
335
+ # Load LoRA weights with respective scales
336
+ lora_names = []
337
+ lora_weights = []
338
+ with calculateDuration("Loading LoRA weights"):
339
+ for idx, lora in enumerate(selected_loras):
340
+ lora_name = f"lora_{idx}"
341
+ lora_names.append(lora_name)
342
+ print(f"Lora Name: {lora_name}")
343
+ lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
344
+ lora_path = lora['repo']
345
+ weight_name = lora.get("weights")
346
+ print(f"Lora Path: {lora_path}")
347
+ pipe_to_use = pipe_i2i if image_input is not None else pipe
348
+ pipe_to_use.load_lora_weights(
349
+ lora_path,
350
+ weight_name=weight_name if weight_name else None,
351
+ low_cpu_mem_usage=True,
352
+ adapter_name=lora_name
353
+ )
354
+ print("Loaded LoRAs:", lora_names)
355
+ print("Adapter weights:", lora_weights)
356
+ if image_input is not None:
357
+ pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
358
+ else:
359
+ pipe.set_adapters(lora_names, adapter_weights=lora_weights)
360
+ print(pipe.get_active_adapters())
361
+ # Set random seed for reproducibility
362
+ with calculateDuration("Randomizing seed"):
363
+ if randomize_seed:
364
+ seed = random.randint(0, MAX_SEED)
365
+
366
+ # Generate image
367
+ if image_input is not None:
368
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
369
+ yield final_image, seed, gr.update(visible=False)
370
+ else:
371
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
372
+ # Consume the generator to get the final image
373
+ final_image = None
374
+ step_counter = 0
375
+ for image in image_generator:
376
+ step_counter += 1
377
+ final_image = image
378
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
379
+ yield image, seed, gr.update(value=progress_bar, visible=True)
380
+ yield final_image, seed, gr.update(value=progress_bar, visible=False)
381
+
382
+ run_lora.zerogpu = True
383
+
384
+ def get_huggingface_safetensors(link):
385
+ split_link = link.split("/")
386
+ if len(split_link) == 2:
387
+ model_card = ModelCard.load(link)
388
+ base_model = model_card.data.get("base_model")
389
+ print(f"Base model: {base_model}")
390
+ if base_model not in ["black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"]:
391
+ raise Exception("Not a FLUX LoRA!")
392
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
393
+ trigger_word = model_card.data.get("instance_prompt", "")
394
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
395
+ fs = HfFileSystem()
396
+ safetensors_name = None
397
+ try:
398
+ list_of_files = fs.ls(link, detail=False)
399
+ for file in list_of_files:
400
+ if file.endswith(".safetensors"):
401
+ safetensors_name = file.split("/")[-1]
402
+ if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
403
+ image_elements = file.split("/")
404
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
405
+ except Exception as e:
406
+ print(e)
407
+ raise gr.Error("Invalid Hugging Face repository with a *.safetensors LoRA")
408
+ if not safetensors_name:
409
+ raise gr.Error("No *.safetensors file found in the repository")
410
+ return split_link[1], link, safetensors_name, trigger_word, image_url
411
+ else:
412
+ raise gr.Error("Invalid Hugging Face repository link")
413
+
414
+ def check_custom_model(link):
415
+ if link.endswith(".safetensors"):
416
+ # Treat as direct link to the LoRA weights
417
+ title = os.path.basename(link)
418
+ repo = link
419
+ path = None # No specific weight name
420
+ trigger_word = ""
421
+ image_url = None
422
+ return title, repo, path, trigger_word, image_url
423
+ elif link.startswith("https://"):
424
+ if "huggingface.co" in link:
425
+ link_split = link.split("huggingface.co/")
426
+ return get_huggingface_safetensors(link_split[1])
427
+ else:
428
+ raise Exception("Unsupported URL")
429
+ else:
430
+ # Assume it's a Hugging Face model path
431
+ return get_huggingface_safetensors(link)
432
+
433
+ def update_history(new_image, history):
434
+ """Updates the history gallery with the new image."""
435
+ if history is None:
436
+ history = []
437
+ history.insert(0, new_image)
438
+ return history
439
+
440
+ css = '''
441
+ #gen_btn{height: 100%}
442
+ #title{text-align: center}
443
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
444
+ #title img{width: 100px; margin-right: 0.25em}
445
+ #gallery .grid-wrap{height: 5vh}
446
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
447
+ .custom_lora_card{margin-bottom: 1em}
448
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
449
+ .card_internal img{margin-right: 1em}
450
+ .styler{--form-gap-width: 0px !important}
451
+ #progress{height:30px}
452
+ #progress .generating{display:none}
453
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
454
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
455
+ #component-8, .button_total{height: 100%; align-self: stretch;}
456
+ #loaded_loras [data-testid="block-info"]{font-size:80%}
457
+ #custom_lora_structure{background: var(--block-background-fill)}
458
+ #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
459
+ #random_btn{font-size: 300%}
460
+ #component-11{align-self: stretch;}
461
+ '''
462
+
463
+ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
464
+ title = gr.HTML(
465
+ """<h1><img src="https://i.imgur.com/wMh2Oek.png" alt="LoRA"> LoRA Lab [beta]</h1><br><span style="
466
+ margin-top: -25px !important;
467
+ display: block;
468
+ margin-left: 37px;
469
+ ">Mix and match any FLUX[dev] LoRAs</span>""",
470
+ elem_id="title",
471
+ )
472
+ loras_state = gr.State(loras)
473
+ selected_indices = gr.State([])
474
+ with gr.Row():
475
+ with gr.Column(scale=3):
476
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
477
+ with gr.Column(scale=1):
478
+ generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
479
+ with gr.Row(elem_id="loaded_loras"):
480
+ with gr.Column(scale=1, min_width=25):
481
+ randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
482
+ with gr.Column(scale=8):
483
+ with gr.Row():
484
+ with gr.Column(scale=0, min_width=50):
485
+ lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
486
+ with gr.Column(scale=3, min_width=100):
487
+ selected_info_1 = gr.Markdown("Select a LoRA 1")
488
+ with gr.Column(scale=5, min_width=50):
489
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
490
+ with gr.Row():
491
+ remove_button_1 = gr.Button("Remove", size="sm")
492
+ with gr.Column(scale=8):
493
+ with gr.Row():
494
+ with gr.Column(scale=0, min_width=50):
495
+ lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
496
+ with gr.Column(scale=3, min_width=100):
497
+ selected_info_2 = gr.Markdown("Select a LoRA 2")
498
+ with gr.Column(scale=5, min_width=50):
499
+ lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
500
+ with gr.Row():
501
+ remove_button_2 = gr.Button("Remove", size="sm")
502
+ with gr.Row():
503
+ with gr.Column():
504
+ with gr.Group():
505
+ with gr.Row(elem_id="custom_lora_structure"):
506
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="multimodalart/vintage-ads-flux", scale=3, min_width=150)
507
+ add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
508
+ remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
509
+ gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
510
+ gallery = gr.Gallery(
511
+ [(item["image"], item["title"]) for item in loras],
512
+ label="Or pick from the LoRA Explorer gallery",
513
+ allow_preview=False,
514
+ columns=5,
515
+ elem_id="gallery",
516
+ show_share_button=False,
517
+ interactive=False
518
+ )
519
+ with gr.Column():
520
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
521
+ result = gr.Image(label="Generated Image", interactive=False, show_share_button=False)
522
+ with gr.Accordion("History", open=False):
523
+ history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
524
+
525
+ with gr.Row():
526
+ with gr.Accordion("Advanced Settings", open=False):
527
+ with gr.Row():
528
+ input_image = gr.Image(label="Input image", type="filepath", show_share_button=False)
529
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
530
+ with gr.Column():
531
+ with gr.Row():
532
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
533
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
534
+
535
+ with gr.Row():
536
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
537
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
538
+
539
+ with gr.Row():
540
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
541
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
542
+
543
+ gallery.select(
544
+ update_selection,
545
+ inputs=[selected_indices, loras_state, width, height],
546
+ outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2])
547
+ remove_button_1.click(
548
+ remove_lora_1,
549
+ inputs=[selected_indices, loras_state],
550
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
551
+ )
552
+ remove_button_2.click(
553
+ remove_lora_2,
554
+ inputs=[selected_indices, loras_state],
555
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
556
+ )
557
+ randomize_button.click(
558
+ randomize_loras,
559
+ inputs=[selected_indices, loras_state],
560
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
561
+ )
562
+ add_custom_lora_button.click(
563
+ add_custom_lora,
564
+ inputs=[custom_lora, selected_indices, loras_state, gallery],
565
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
566
+ )
567
+ remove_custom_lora_button.click(
568
+ remove_custom_lora,
569
+ inputs=[selected_indices, loras_state, gallery],
570
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
571
+ )
572
+ gr.on(
573
+ triggers=[generate_button.click, prompt.submit],
574
+ fn=run_lora,
575
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
576
+ outputs=[result, seed, progress_bar]
577
+ ).then(
578
+ fn=lambda x, history: update_history(x, history),
579
+ inputs=[result, history_gallery],
580
+ outputs=history_gallery,
581
+ )
582
+
583
+ app.queue()
584
+ app.launch()
custom.png ADDED
flux_lora.png ADDED
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
live_preview_helpers.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ # FLUX pipeline function
43
+ @torch.inference_mode()
44
+ def flux_pipe_call_that_returns_an_iterable_of_images(
45
+ self,
46
+ prompt: Union[str, List[str]] = None,
47
+ prompt_2: Optional[Union[str, List[str]]] = None,
48
+ height: Optional[int] = None,
49
+ width: Optional[int] = None,
50
+ num_inference_steps: int = 28,
51
+ timesteps: List[int] = None,
52
+ guidance_scale: float = 3.5,
53
+ num_images_per_prompt: Optional[int] = 1,
54
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
55
+ latents: Optional[torch.FloatTensor] = None,
56
+ prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
58
+ output_type: Optional[str] = "pil",
59
+ return_dict: bool = True,
60
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
61
+ max_sequence_length: int = 512,
62
+ good_vae: Optional[Any] = None,
63
+ ):
64
+ height = height or self.default_sample_size * self.vae_scale_factor
65
+ width = width or self.default_sample_size * self.vae_scale_factor
66
+
67
+ # 1. Check inputs
68
+ self.check_inputs(
69
+ prompt,
70
+ prompt_2,
71
+ height,
72
+ width,
73
+ prompt_embeds=prompt_embeds,
74
+ pooled_prompt_embeds=pooled_prompt_embeds,
75
+ max_sequence_length=max_sequence_length,
76
+ )
77
+
78
+ self._guidance_scale = guidance_scale
79
+ self._joint_attention_kwargs = joint_attention_kwargs
80
+ self._interrupt = False
81
+
82
+ # 2. Define call parameters
83
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
84
+ device = self._execution_device
85
+
86
+ # 3. Encode prompt
87
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
88
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
89
+ prompt=prompt,
90
+ prompt_2=prompt_2,
91
+ prompt_embeds=prompt_embeds,
92
+ pooled_prompt_embeds=pooled_prompt_embeds,
93
+ device=device,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ max_sequence_length=max_sequence_length,
96
+ lora_scale=lora_scale,
97
+ )
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+ # 5. Prepare timesteps
111
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
112
+ image_seq_len = latents.shape[1]
113
+ mu = calculate_shift(
114
+ image_seq_len,
115
+ self.scheduler.config.base_image_seq_len,
116
+ self.scheduler.config.max_image_seq_len,
117
+ self.scheduler.config.base_shift,
118
+ self.scheduler.config.max_shift,
119
+ )
120
+ timesteps, num_inference_steps = retrieve_timesteps(
121
+ self.scheduler,
122
+ num_inference_steps,
123
+ device,
124
+ timesteps,
125
+ sigmas,
126
+ mu=mu,
127
+ )
128
+ self._num_timesteps = len(timesteps)
129
+
130
+ # Handle guidance
131
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
132
+
133
+ # 6. Denoising loop
134
+ for i, t in enumerate(timesteps):
135
+ if self.interrupt:
136
+ continue
137
+
138
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
139
+
140
+ noise_pred = self.transformer(
141
+ hidden_states=latents,
142
+ timestep=timestep / 1000,
143
+ guidance=guidance,
144
+ pooled_projections=pooled_prompt_embeds,
145
+ encoder_hidden_states=prompt_embeds,
146
+ txt_ids=text_ids,
147
+ img_ids=latent_image_ids,
148
+ joint_attention_kwargs=self.joint_attention_kwargs,
149
+ return_dict=False,
150
+ )[0]
151
+ # Yield intermediate result
152
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
153
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
154
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
155
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
156
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
157
+ torch.cuda.empty_cache()
158
+
159
+
160
+ # Final image using good_vae
161
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
162
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
163
+ image = good_vae.decode(latents, return_dict=False)[0]
164
+ self.maybe_free_model_hooks()
165
+ torch.cuda.empty_cache()
166
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
loras.json ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "image": "https://huggingface.co/renderartist/retrocomicflux/resolve/main/images/ComfyUI_temp_ipugi_00131_.png",
4
+ "repo": "renderartist/retrocomicflux",
5
+ "trigger_word": "c0m1c style vintage 1930s style comic strip panel of",
6
+ "title": "Retro Comic",
7
+ "trigger_position": "prepend"
8
+ },
9
+ {
10
+ "image": "https://huggingface.co/glif/l0w-r3z/resolve/main/images/a19d658b-5d4c-45bc-9df6-f2bec54462a5.png",
11
+ "repo": "glif/l0w-r3z",
12
+ "trigger_word": ", l0w-r3z",
13
+ "title": "Low Res 3D"
14
+ },
15
+ {
16
+ "repo": "Purz/vhs-box",
17
+ "image": "https://huggingface.co/Purz/vhs-box/resolve/main/33726559.jpeg",
18
+ "trigger_word": ", vhs_box",
19
+ "title": "VHS Box"
20
+ },
21
+ {
22
+ "image": "https://huggingface.co/renderartist/simplevectorflux/resolve/main/images/ComfyUI_09477_.jpeg",
23
+ "title": "Simple Vector",
24
+ "repo": "renderartist/simplevectorflux",
25
+ "trigger_word": "v3ct0r style, simple flat vector art, isolated on white bg,",
26
+ "trigger_position": "prepend"
27
+ },
28
+ {
29
+ "image": "https://huggingface.co/glif/how2draw/resolve/main/images/glif-how2draw-araminta-k-vbnvy94npt8m338r2vm02m50.jpg",
30
+ "repo": "glif/how2draw",
31
+ "trigger_word": ", How2Draw",
32
+ "title": "How2Draw"
33
+ },
34
+ {
35
+ "image": "https://huggingface.co/glif/anime-blockprint-style/resolve/main/images/glif-block-print-anime-flux-dev-araminta-k-lora-araminta-k-kbde06qyovrmvsv65ubfyhn1.jpg",
36
+ "repo": "glif/anime-blockprint-style",
37
+ "trigger_word": ", blockprint style",
38
+ "title": "Blockprint Style"
39
+ },
40
+ {
41
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
42
+ "title": "Tarot v1",
43
+ "repo": "multimodalart/flux-tarot-v1",
44
+ "trigger_word": "in the style of TOK a trtcrd, tarot style",
45
+ "aspect": "portrait"
46
+ },
47
+ {
48
+ "repo": "alvdansen/pola-photo-flux",
49
+ "image": "https://huggingface.co/alvdansen/pola-photo-flux/resolve/main/images/out-2%20(83).webp",
50
+ "trigger_word": ", polaroid style",
51
+ "title": "Polaroid Style"
52
+ },
53
+ {
54
+ "image": "https://huggingface.co/dvyio/flux-lora-the-sims/resolve/main/images/dunBAVBsALOepaE_dsWFI_6b0fef6b0fc4472aa07d00edea7c75b3.jpg",
55
+ "repo": "dvyio/flux-lora-the-sims",
56
+ "trigger_word": ", video game screenshot in the style of THSMS",
57
+ "title": "The Sims style"
58
+ },
59
+ {
60
+ "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
61
+ "title": "SoftPasty",
62
+ "repo": "alvdansen/softpasty-flux-dev",
63
+ "trigger_word": "araminta_illus illustration style"
64
+ },
65
+ {
66
+ "image": "https://huggingface.co/dvyio/flux-lora-film-noir/resolve/main/images/S8iWMa0GamEcFkanHHmI8_a232d8b83bb043808742d661dac257f7.jpg",
67
+ "title": "Film Noir",
68
+ "repo": "dvyio/flux-lora-film-noir",
69
+ "trigger_word": "in the style of FLMNR"
70
+ },
71
+ {
72
+ "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
73
+ "title": "1920s cartoon",
74
+ "repo": "AIWarper/RubberCore1920sCartoonStyle",
75
+ "trigger_word": "RU883R style",
76
+ "trigger_position": "prepend"
77
+ },
78
+ {
79
+ "image": "https://huggingface.co/Norod78/JojosoStyle-flux-lora/resolve/main/samples/1725244218477__000004255_1.jpg",
80
+ "title": "JoJo Style",
81
+ "repo": "Norod78/JojosoStyle-flux-lora",
82
+ "trigger_word": "JojosoStyle",
83
+ "trigger_position": "prepend"
84
+ },
85
+ {
86
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
87
+ "title": "flux-Realism",
88
+ "repo": "XLabs-AI/flux-RealismLora",
89
+ "trigger_word": ""
90
+ },
91
+ {
92
+ "image": "https://huggingface.co/multimodalart/vintage-ads-flux/resolve/main/samples/j_XNU6Oe0mgttyvf9uPb3_dc244dd3d6c246b4aff8351444868d66.png",
93
+ "title": "Vintage Ads",
94
+ "repo":"multimodalart/vintage-ads-flux",
95
+ "trigger_word": "a vintage ad of",
96
+ "trigger_position": "prepend"
97
+ },
98
+ {
99
+ "image": "https://huggingface.co/mgwr/Cine-Aesthetic/resolve/main/images/00030-1333633802.png",
100
+ "title": "Cine Aesthetic",
101
+ "repo": "mgwr/Cine-Aesthetic",
102
+ "trigger_word": "mgwr/cine",
103
+ "trigger_position": "prepend"
104
+ },
105
+ {
106
+ "image": "https://huggingface.co/sWizad/pokemon-trainer-sprites-pixelart-flux/resolve/main/26578915.jpeg",
107
+ "repo": "sWizad/pokemon-trainer-sprites-pixelart-flux",
108
+ "title": "Pokemon Trainer Sprites",
109
+ "trigger_word": "white background, a pixel image of",
110
+ "trigger_position": "prepend"
111
+ },
112
+ {
113
+ "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
114
+ "title": "animation2k",
115
+ "repo": "nerijs/animation2k-flux",
116
+ "trigger_word": ""
117
+ },
118
+ {
119
+ "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
120
+ "title":"SoftServe Anime",
121
+ "repo": "alvdansen/softserve_anime",
122
+ "trigger_word": ""
123
+ },
124
+ {
125
+ "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
126
+ "title": "PS1 style",
127
+ "repo": "veryVANYA/ps1-style-flux",
128
+ "trigger_word": "ps1 game screenshot,",
129
+ "trigger_position": "prepend"
130
+ },
131
+ {
132
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
133
+ "title": "flux koda",
134
+ "repo": "alvdansen/flux-koda",
135
+ "trigger_word": "flmft style"
136
+ },
137
+ {
138
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
139
+ "title": "Frosting Lane Flux",
140
+ "repo": "alvdansen/frosting_lane_flux",
141
+ "trigger_word": ""
142
+ },
143
+ {
144
+ "image": "https://huggingface.co/davisbro/half_illustration/resolve/main/images/example3.webp",
145
+ "title": "Half Illustration",
146
+ "repo": "davisbro/half_illustration",
147
+ "trigger_word": "in the style of TOK"
148
+ },
149
+ {
150
+ "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
151
+ "title":"wrong",
152
+ "repo": "fofr/flux-wrong",
153
+ "trigger_word": "WRNG"
154
+ },
155
+ {
156
+ "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
157
+ "title":"Yarn Art",
158
+ "repo": "linoyts/yarn_art_Flux_LoRA",
159
+ "trigger_word": ", yarn art style"
160
+ },
161
+ {
162
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
163
+ "title": "Paper Cutout",
164
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
165
+ "trigger_word": ", Paper Cutout Style"
166
+ },
167
+ {
168
+ "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
169
+ "title": "Aquarell Watercolor",
170
+ "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
171
+ "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
172
+ },
173
+ {
174
+ "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
175
+ "title": "SyntheticAnime",
176
+ "repo": "dataautogpt3/FLUX-SyntheticAnime",
177
+ "trigger_word": "1980s anime screengrab, VHS quality"
178
+ },
179
+ {
180
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
181
+ "title": "flux-anime",
182
+ "repo": "XLabs-AI/flux-lora-collection",
183
+ "weights": "anime_lora.safetensors",
184
+ "trigger_word": ", anime"
185
+ },
186
+ {
187
+ "image": "https://replicate.delivery/yhqm/QD8Ioy5NExqSCtBS8hG04XIRQZFaC9pxJemINT1bibyjZfSTA/out-0.webp",
188
+ "title": "80s Cyberpunk",
189
+ "repo": "fofr/flux-80s-cyberpunk",
190
+ "trigger_word": "style of 80s cyberpunk",
191
+ "trigger_position": "prepend"
192
+ },
193
+ {
194
+ "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
195
+ "title": "Boreal",
196
+ "repo": "kudzueye/boreal-flux-dev-v2",
197
+ "trigger_word": "phone photo"
198
+ },
199
+ {
200
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
201
+ "title": "flux-disney",
202
+ "repo": "XLabs-AI/flux-lora-collection",
203
+ "weights": "disney_lora.safetensors",
204
+ "trigger_word": ", disney style"
205
+ },
206
+ {
207
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
208
+ "title": "flux-art",
209
+ "repo": "XLabs-AI/flux-lora-collection",
210
+ "weights": "art_lora.safetensors",
211
+ "trigger_word": ", art"
212
+ },
213
+ {
214
+ "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
215
+ "title": "Retrofuturism Flux",
216
+ "repo": "martintomov/retrofuturism-flux",
217
+ "trigger_word": ", retrofuturism"
218
+ }
219
+ ]
prompts.csv ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ intelligence
2
+ "A raccoon wearing formal clothes, wearing a tophat and holding a cane. The raccoon is holding a garbage bag. Oil painting in the style of traditional Chinese painting."
3
+ a wood cabin
4
+ A tornado made of sharks crashing into a skyscraper. painting in the style of abstract cubism.
5
+ a beach with apartment buildings next to it
6
+ a cat coming through a cat door
7
+ an extreme close-up view of a capybara sitting in a field
8
+ a red cube on top of a blue cube
9
+ weeds in the cracks of a sidewalk
10
+ a smiling man with wavy brown hair and trimmed beard
11
+ A photo of an Athenian vase with a painting of pangolins playing tennis in the style of Egyptian hieroglyphics
12
+ a crowd of people watching fireworks by a park
13
+ a book with the words 'Don't Panic!' written on it
14
+ Portrait of a gecko wearing a train conductor’s hat and holding a flag that has a yin-yang symbol on it. Woodcut.
15
+ a violin next to an apple
16
+ a smiling man
17
+ a cricketer standing next to a wicket
18
+ a thumbnail image of an ice cream cone
19
+ a hamster dragon
20
+ "an old raccoon wearing a top hat and holding an apple, oil painting in the style of van gogh"
21
+ a traffic jam at Times Square
22
+ "a traffic jam at Times Square, with a cat on top of each car"
23
+ a glass of orange juice to the right of a plate with buttered toast on it
24
+ a clock tower
25
+ "Aerial view of downtown Manhattan, but with Millennium Wheel next to the Statue of Liberty. The Great Pyramid is on a sandy island near the buildings."
26
+ a pig in a field
27
+ "the door of knowing, a portal brightly opening the way through darkness. abstract anime landscape oil painting."
28
+ a sticker stuck in the middle of a stop sign
29
+ a cat jumping down from a wall
30
+ a man reading a book with a prism on its cover
31
+ "A DSLR photo of a shiny VW van that has a cityscape painted on it. A smiling sloth stands on grass in front of the van and is wearing a leather jacket, a cowboy hat, a kilt and a bowtie. The sloth is holding a quarterstaff and a big book."
32
+ a wall
33
+ A punk rock frog in a studded leather jacket shouting into a microphone while standing on a boulder
34
+ a red sports car on the road
35
+ A red bus is driving on the road
36
+ A photo of an Athenian vase with a painting of toucans playing tennis in the style of Egyptian hieroglyphics
37
+ a moose by a mountain stream
38
+ a coloring book page of a horse next to a stream
39
+ a coffee mug with an ankh symbol on it
40
+ three green glass bottles
41
+ a blue airplane taxiing on a runway with the sun behind it
42
+ the skyline of New York City
43
+ a present
44
+ a car with no windows
45
+ A tourist is looking at a whale using a binocular
46
+ a white plastic bench with a high arched back
47
+ element
48
+ food
49
+ a book cover
50
+ a series of musical notes
51
+ a large white yacht
52
+ a horse running in a field
53
+ "a statue of Abraham Lincoln wearing an opaque and shiny astronaut's helmet. The statue sits on the moon, with the planet Earth in the sky."
54
+ a volcano erupting near a small town
55
+ an ostrich
56
+ the finale of a fireworks display
57
+ Portrait of a tiger wearing a train conductor's hat and holding a skateboard that has a yin-yang symbol on it. Chinese ink and wash painting
58
+ a giant gorilla at the top of the Empire State Building
59
+ a drawing of the skyline of New York City
60
+ alien history
61
+ a red fire hydrant by a brick wall
62
+ an inflatable rabbit held up in the air by the geyser Old Faithful
63
+ A large city fountain that has milk instead of water. Several cats are leaning into the fountain.
64
+ a mixed media image with a photograph of a woman with long orange hair over a background that is a sketch of a city skyline
65
+ a cat patting a crystal ball with the number 7 written on it in black marker
66
+ Ha Long Bay
67
+ a comic about a friendly car in the city
68
+ a chimpanzee sitting on a wooden bench
69
+ space
70
+ A giraffe walking through a green grass covered field
71
+ "A set of 2x2 emoji icons with happy, angry, surprised and sobbing faces. The emoji icons look like pigs. All of the pigs are wearing crowns."
72
+ a person with arms like a tree branch
73
+ A small house in the wilderness
74
+ a fox
75
+ "Oil painting of a giant robot made of sushi, holding chopsticks."
76
+ a windmill
77
+ "The saying ""BE EXCELLENT TO EACH OTHER"" written in a stained glass window."
78
+ a watermelon chair
79
+ a drawing of a peaceful lakeside landscape
80
+ a thumbnail image of a person skiing
81
+ a nerdy bear wearing glasses and a bowtie
82
+ artificial intelligence
83
+ "a nerdy bear wearing glasses and a bowtie, realistic"
84
+ a green clock in the shape of a pentagon
85
+ a yellow box to the right of a blue sphere
86
+ two chairs
87
+ a woman looking at a house
88
+ an espresso machine
89
+ The trophy doesn't fit into the brown suitcase because it's too small
90
+ a volcano with lava pouring down its slopes
91
+ a laughing woman
92
+ a drawing of a pig face with an eye patch
93
+ a clock with no hands
94
+ a silver fire hydrant next to a sidewalk
95
+ a Tyrannosaurus Rex roaring in front of a palm tree
96
+ A bowl of Pho served with bean sprouts on top
97
+ a pick-up truck
98
+ "A raccoon wearing formal clothes, wearing a tophat and holding a cane. The raccoon is holding a garbage bag. Oil painting in the style of Rembrandt."
99
+ a girl diving into a pool
100
+ a monarch butterfly hatching from its chrysalis
101
+ a ceiling fan with four white blades
102
+ chair
103
+ A portrait of a metal statue of a pharaoh wearing steampunk glasses and a leather jacket over a white t-shirt that has a drawing of a space shuttle on it.
104
+ a horse reading a newspaper
105
+ A photo of a panda made of water.
106
+ three pickup trucks piled on top of each other
107
+ people packed on a double-decker bus
108
+ the grand canyon on a cloudy day
109
+ a pair of headphones on a pumpkin
110
+ A photo of a palm tree made of water.
111
+ view of a clock tower from below
112
+ "a robot painted as graffiti on a brick wall. a sidewalk is in front of the wall, and grass is growing out of cracks in the concrete."
113
+ G I G G L E painted in thick colorful lettering as graffiti on a faded red brick wall with a splotch of exploding white paint.
114
+ Sunset over the sea
115
+ a tiger in a forest
116
+ the city of London on Mars
117
+ the moon with a smiling face
118
+ a sword slicing through pouring milk
119
+ a boy and a tiger
120
+ a comic about a boy and a tiger
121
+ fairy cottage with smoke coming up chimney and a squirrel looking from the window
122
+ a flower with large red petals growing on the moon's surface
123
+ a woman using a sledgehammer to smash an ice sculpture of a goose
124
+ a kangaroo jumping through the park
125
+ a lovestruck cup of boba
126
+ a tennis court with tennis balls scattered all over it
127
+ a family
128
+ a man pouring milk into a coffee cup to make a latte with a beatiful design
129
+ a field with ten massive modern windmills
130
+ a cartoon of a cow jumping over the moon
131
+ the flag of the United Kingdom painted in rusty corrugated iron
132
+ a mouse sitting next to a computer mouse
133
+ a white cat with black ears and markings
134
+ "Two cups of coffee, one with latte art of yin yang symbol. The other has latte art of a heart."
135
+ matching socks with cute cats on them
136
+ "Renaissance portrayals of the Virgin Mary, seated in a loggia. Behind her is a hazy and seemingly isolated landscape imagined by the artist and painted using sfumato."
137
+ a coffee table with a magazine on it
138
+ A heart made of cookie
139
+ a penguin standing on a sidewalk
140
+ "bismuth crystals, intricate fractal pattern"
141
+ "chaotic attractor, multicolored neon"
142
+ "fractal heart pattern, pink blue and white"
143
+ chaotic system interpretation of polyamory
144
+ high dimensional topology of latent spaces
145
+ 300 movie titles
146
+ a man with puppet that looks like a king
147
+ a group of skiers are preparing to walk up a sand dune
148
+ a blue t-shirt with a dinosaur on it
149
+ a king salmon
150
+ an Egyptian statue in the desert
151
+ a moose standing over a fox
152
+ A bowl of soup that looks like a monster spray-painted on a wall
153
+ a shih-tzu dog
154
+ a pirate ship
155
+ early bird and night owl
156
+ a taxi
157
+ the eyes of an owl
158
+ graffiti of a rocket ship on a brick wall
159
+ a musical note
160
+ a small airplane flying over rolling hills
161
+ a tiny football in front of three yellow tennis balls
162
+ a cute illustration of a horned owl with a graduation cap and diploma
163
+ a teddy bear to the right of a toy car
164
+ a map of Australia
165
+ an eagle
166
+ a roast turkey on the table
167
+ a store front
168
+ a map of Manhattan
169
+ a portrait of a postal worker who has forgotten their mailbag
170
+ The Statue of Liberty with the Manhattan skyline in the background.
171
+ a bottle of beer next to an ashtray with a half-smoked cigarrette
172
+ a monarch butterfly
173
+ a large blue box
174
+ a car with tires that have yellow rims
175
+ A funny Rube Goldberg machine made out of metal
176
+ a photograph of a fiddle next to a basketball on a ping pong table
177
+ A bowl of Chicken Pho
178
+ view of a giraffe and a zebra in the middle of a field
179
+ a sunken submarine at the bottom of the ocean
180
+ two wine bottles
181
+ a turtle upside down and spinning on its shell
182
+ a painting of a fox in the style of starry night
183
+ a poodle wearing a baseball cap holding a dictionary in hand and writing bonez on a chalkboard
184
+ a stained glass window of a panda eating bamboo
185
+ a blue cow is standing next to a tree with red leaves and yellow fruit. the cow is standing in a field with white flowers. impressionistic painting.
186
+ "an ornate, high-backed mahogany chair with a red cushion"
187
+ the hands of a single person holding a basketball
188
+ a snail
189
+ a man with puppet
190
+ a pickup truck with a horse on its left and two dogs on its right
191
+ an airplane flying into a cloud that looks like monster
192
+ 7 dogs sitting around a poker table
193
+ a racoon detective using a microscope while riding in a train
194
+ a kids' book cover with an illustration of white dog driving a red pickup truck
195
+ a woman with long hair next to a luminescent bird
196
+ a boat
197
+ a high-quality oil painting of a psychedelic hamster dragon
198
+ age of empires
199
+ Downtown Austin at sunrise. detailed ink wash.
200
+ a pile of cash on a stone floor
201
+ A helicopter flies over the Arches National Park.
202
+ the word 'START' written on a street surface
203
+ a tennis court with three yellow cones on it
204
+ "A rabbit checks its watch, and so does a gecko."
205
+ a close-up of a bloody mary cocktail
206
+ a view of the Kremlin on a sunny day
207
+ The Oriental Pearl in oil painting
208
+ Tibetan priests ringing a bell
209
+ Portrait of a tiger wearing a train conductor's hat and holding a skateboard that has a yin-yang symbol on it. charcoal sketch
210
+ a cat sitting in a car seat
211
+ a selfie of an old man with a white beard
212
+ "A green sign that says ""Very Deep Learning"" and is at the edge of the Grand Canyon."
213
+ an octopus
214
+ a zebra
215
+ "a warrior, golden army"
216
+ a triangle with a smiling face
217
+ "Pop art, bold colors, mass culture, commercial techniques, screen printing, repetition, consumerism imagery, iconic portraits, flat imagery, irony, everyday objects"
218
+ "Minimalist, conceptual, geometric, bold, abstract, systematic, wall drawings, instructions-based, modular forms, repetitive patterns, color-blocks, line variations, innovative, influential"
219
+ "Conceptual, mixed-media, appropriation, text-and-image, irony, photo-collage, bold colors, visual narrative, humorous, minimalist, innovative, montage, semiotics, critique, pop culture, fragmented, juxtaposition"
220
+ "Feminist, avant-garde, body art, performance art, video art, provocative, conceptual, radical, multimedia, body politics, identity exploration, societal critique"
221
+ "In the style of Rembrandt, this portrait on canvas captures a man seated in a dimly lit space, his gaze introspective and laden with experience. His clothing is of a rich, textured fabric, suggesting a life of complexity and depth. The sparse background emphasizes his solitary figure, highlighting the contemplative mood and the dignified simplicity of his demeanor. The overall atmosphere is one of introspection and timeless wisdom."
222
+ "East-West fusion, trans-cultural, dream-like, nature-inspired, ethereal, organic forms, delicate colors, figurative-abstract blend, poetic, intimate, evolving, mystical, feminine perspective"
223
+ "17th century, Rembrandt Harmensz van Rijn, Baroque art, oil on canvas, half-body portrait, solitary woman, neutral expression, indoor scene, warm lighting, detailed drapery, velvet dress, gold embroidery, pearl necklace, lace collar, ornate jewelry, realistic skin tones, fine hair details, soft shadows, meticulous brushwork, classical pose, timeless elegance, artistic mastery, cultural significance, visual narrative."
224
+ "Rembrandt Harmensz van Rijn, 17th century Dutch Golden Age, oil on canvas, half-body portrait, female figure, neutral facial expression, elaborate Renaissance attire, rich velvet dress with intricate embroidery, lace collar, pearl necklace, ornate headdress adorned with jewels, seated pose, hands resting on a table, dark background with soft lighting, realistic style, fine brushwork, attention to detail in textures and fabrics, historical context, cultural significance."
225
+ "Rembrandt Harmensz. van Rijn, oil on canvas, captures a distinguished gentleman in a moment of quiet reflection. He is attired in a sumptuous black velvet cloak with a high white collar, the textures rendered with precise detail to showcase the contrast between the soft velvet and crisp linen. The subtle play of light illuminates his contemplative expression and highlights the gentle grip of his hand on a leather-bound journal, hinting at a life of intellectual pursuits. The background is a soft interplay of shadows, enhancing the intimate and introspective mood of the portrait."
226
+ "Minimalist, large-scale, industrial materials, site-specific, immersive, abstract, sculptural, steel-focused, gravity-based, experiential, curved forms, monumental, spatial exploration, environmental interaction, tactile"
227
+ "Minimalist, conceptual, postmodern, geometric abstraction, color-block, multi-disciplinary, playful irony, spatial intervention, typographic, avant-garde, video art, performance elements"
228
+ "Abstract, minimalism, expressionism, bold colors, geometric shapes, large-scale works, mixed media, architectural elements, contemporary, post-modern, layered textures"
229
+ "Environmental art, large-scale installations, fabric-wrapped structures, public spaces, temporary works, conceptual, immersive experiences, land art, transformative, outdoor"
230
+ "found-object assemblage, Eat Art, Nouveau Réalisme, tableaux traps, everyday objects, collage, three-dimensional, playful, ironic, conceptual, kinetic, interactive, mixed media"
231
+ a submarine floating past a shark
232
+ "Photomontage, feminist, critical, conceptual, socio-political, collage, video art, performance, domestic sphere, anti-war, satire, urban life, activist, multimedia, appropriation"
233
+ "Abstract expressionism, scribbles, calligraphy, graffiti, child-like, spontaneous, emotional, textured, layered, historical references, poetic, minimal color, gestural, large-scale canvases"
234
+ "Contemporary, provocative, conceptual, mixed-media, death-themed, colorful, controversial, pharmaceutical motifs, formaldehyde specimens, spot paintings, spin art, luxury, critique of consumerism, installation art."
235
+ a room
236
+ "minimalism, color fields, abstract, geometric shapes, bold simplicity, hard edges, monochromes, vibrant, non-figurative, spatial relationships, innovative, pure forms"
237
+ "Photographic narrative, African-American experience, critical race exploration, feminism, cultural identity, social justice, mixed media, evocative, poignant, reflective, storytelling, human condition"
238
+ a Harley-Davidson motorcycle with a flame decal
239
+ an ornate jewel-encrusted key
240
+ a woman running on a trail
241
+ a giraffe with a funny face
242
+ square blue apples on a tree with circular yellow leaves
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ git+https://github.com/huggingface/transformers.git
3
+ git+https://github.com/huggingface/accelerate.git
4
+ safetensors
5
+ sentencepiece
6
+ git+https://github.com/huggingface/peft.git