ACCC1380 commited on
Commit
6e5ade7
1 Parent(s): 1d4d039

Upload lora-scripts/sd-scripts/train_textual_inversion_XTI.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/train_textual_inversion_XTI.py ADDED
@@ -0,0 +1,720 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import argparse
3
+ import math
4
+ import os
5
+ import toml
6
+ from multiprocessing import Value
7
+
8
+ from tqdm import tqdm
9
+
10
+ import torch
11
+ from library import deepspeed_utils
12
+ from library.device_utils import init_ipex, clean_memory_on_device
13
+
14
+ init_ipex()
15
+
16
+ from accelerate.utils import set_seed
17
+ import diffusers
18
+ from diffusers import DDPMScheduler
19
+ import library
20
+
21
+ import library.train_util as train_util
22
+ import library.huggingface_util as huggingface_util
23
+ import library.config_util as config_util
24
+ from library.config_util import (
25
+ ConfigSanitizer,
26
+ BlueprintGenerator,
27
+ )
28
+ import library.custom_train_functions as custom_train_functions
29
+ from library.custom_train_functions import (
30
+ apply_snr_weight,
31
+ prepare_scheduler_for_custom_training,
32
+ pyramid_noise_like,
33
+ apply_noise_offset,
34
+ scale_v_prediction_loss_like_noise_prediction,
35
+ apply_debiased_estimation,
36
+ apply_masked_loss,
37
+ )
38
+ import library.original_unet as original_unet
39
+ from XTI_hijack import unet_forward_XTI, downblock_forward_XTI, upblock_forward_XTI
40
+ from library.utils import setup_logging, add_logging_arguments
41
+
42
+ setup_logging()
43
+ import logging
44
+
45
+ logger = logging.getLogger(__name__)
46
+
47
+ imagenet_templates_small = [
48
+ "a photo of a {}",
49
+ "a rendering of a {}",
50
+ "a cropped photo of the {}",
51
+ "the photo of a {}",
52
+ "a photo of a clean {}",
53
+ "a photo of a dirty {}",
54
+ "a dark photo of the {}",
55
+ "a photo of my {}",
56
+ "a photo of the cool {}",
57
+ "a close-up photo of a {}",
58
+ "a bright photo of the {}",
59
+ "a cropped photo of a {}",
60
+ "a photo of the {}",
61
+ "a good photo of the {}",
62
+ "a photo of one {}",
63
+ "a close-up photo of the {}",
64
+ "a rendition of the {}",
65
+ "a photo of the clean {}",
66
+ "a rendition of a {}",
67
+ "a photo of a nice {}",
68
+ "a good photo of a {}",
69
+ "a photo of the nice {}",
70
+ "a photo of the small {}",
71
+ "a photo of the weird {}",
72
+ "a photo of the large {}",
73
+ "a photo of a cool {}",
74
+ "a photo of a small {}",
75
+ ]
76
+
77
+ imagenet_style_templates_small = [
78
+ "a painting in the style of {}",
79
+ "a rendering in the style of {}",
80
+ "a cropped painting in the style of {}",
81
+ "the painting in the style of {}",
82
+ "a clean painting in the style of {}",
83
+ "a dirty painting in the style of {}",
84
+ "a dark painting in the style of {}",
85
+ "a picture in the style of {}",
86
+ "a cool painting in the style of {}",
87
+ "a close-up painting in the style of {}",
88
+ "a bright painting in the style of {}",
89
+ "a cropped painting in the style of {}",
90
+ "a good painting in the style of {}",
91
+ "a close-up painting in the style of {}",
92
+ "a rendition in the style of {}",
93
+ "a nice painting in the style of {}",
94
+ "a small painting in the style of {}",
95
+ "a weird painting in the style of {}",
96
+ "a large painting in the style of {}",
97
+ ]
98
+
99
+
100
+ def train(args):
101
+ if args.output_name is None:
102
+ args.output_name = args.token_string
103
+ use_template = args.use_object_template or args.use_style_template
104
+ setup_logging(args, reset=True)
105
+
106
+ train_util.verify_training_args(args)
107
+ train_util.prepare_dataset_args(args, True)
108
+
109
+ if args.sample_every_n_steps is not None or args.sample_every_n_epochs is not None:
110
+ logger.warning(
111
+ "sample_every_n_steps and sample_every_n_epochs are not supported in this script currently / sample_every_n_stepsとsample_every_n_epochsは現在このスクリプトではサポートされていません"
112
+ )
113
+ assert (
114
+ args.dataset_class is None
115
+ ), "dataset_class is not supported in this script currently / dataset_classは現在このスクリプトではサポートされていません"
116
+
117
+ cache_latents = args.cache_latents
118
+
119
+ if args.seed is not None:
120
+ set_seed(args.seed)
121
+
122
+ tokenizer = train_util.load_tokenizer(args)
123
+
124
+ # acceleratorを準備する
125
+ logger.info("prepare accelerator")
126
+ accelerator = train_util.prepare_accelerator(args)
127
+
128
+ # mixed precisionに対応した型を用意しておき適宜castする
129
+ weight_dtype, save_dtype = train_util.prepare_dtype(args)
130
+
131
+ # モデルを読み込む
132
+ text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator)
133
+
134
+ # Convert the init_word to token_id
135
+ if args.init_word is not None:
136
+ init_token_ids = tokenizer.encode(args.init_word, add_special_tokens=False)
137
+ if len(init_token_ids) > 1 and len(init_token_ids) != args.num_vectors_per_token:
138
+ logger.warning(
139
+ f"token length for init words is not same to num_vectors_per_token, init words is repeated or truncated / 初期化単語のトークン長がnum_vectors_per_tokenと合わないため、繰り返しまたは切り捨てが発生します: length {len(init_token_ids)}"
140
+ )
141
+ else:
142
+ init_token_ids = None
143
+
144
+ # add new word to tokenizer, count is num_vectors_per_token
145
+ token_strings = [args.token_string] + [f"{args.token_string}{i+1}" for i in range(args.num_vectors_per_token - 1)]
146
+ num_added_tokens = tokenizer.add_tokens(token_strings)
147
+ assert (
148
+ num_added_tokens == args.num_vectors_per_token
149
+ ), f"tokenizer has same word to token string. please use another one / 指定したargs.token_stringは既に存在します。別の単語を使ってください: {args.token_string}"
150
+
151
+ token_ids = tokenizer.convert_tokens_to_ids(token_strings)
152
+ logger.info(f"tokens are added: {token_ids}")
153
+ assert min(token_ids) == token_ids[0] and token_ids[-1] == token_ids[0] + len(token_ids) - 1, f"token ids is not ordered"
154
+ assert len(tokenizer) - 1 == token_ids[-1], f"token ids is not end of tokenize: {len(tokenizer)}"
155
+
156
+ token_strings_XTI = []
157
+ XTI_layers = [
158
+ "IN01",
159
+ "IN02",
160
+ "IN04",
161
+ "IN05",
162
+ "IN07",
163
+ "IN08",
164
+ "MID",
165
+ "OUT03",
166
+ "OUT04",
167
+ "OUT05",
168
+ "OUT06",
169
+ "OUT07",
170
+ "OUT08",
171
+ "OUT09",
172
+ "OUT10",
173
+ "OUT11",
174
+ ]
175
+ for layer_name in XTI_layers:
176
+ token_strings_XTI += [f"{t}_{layer_name}" for t in token_strings]
177
+
178
+ tokenizer.add_tokens(token_strings_XTI)
179
+ token_ids_XTI = tokenizer.convert_tokens_to_ids(token_strings_XTI)
180
+ logger.info(f"tokens are added (XTI): {token_ids_XTI}")
181
+ # Resize the token embeddings as we are adding new special tokens to the tokenizer
182
+ text_encoder.resize_token_embeddings(len(tokenizer))
183
+
184
+ # Initialise the newly added placeholder token with the embeddings of the initializer token
185
+ token_embeds = text_encoder.get_input_embeddings().weight.data
186
+ if init_token_ids is not None:
187
+ for i, token_id in enumerate(token_ids_XTI):
188
+ token_embeds[token_id] = token_embeds[init_token_ids[(i // 16) % len(init_token_ids)]]
189
+ # logger.info(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
190
+
191
+ # load weights
192
+ if args.weights is not None:
193
+ embeddings = load_weights(args.weights)
194
+ assert len(token_ids) == len(
195
+ embeddings
196
+ ), f"num_vectors_per_token is mismatch for weights / 指定した重みとnum_vectors_per_tokenの値が異なります: {len(embeddings)}"
197
+ # logger.info(token_ids, embeddings.size())
198
+ for token_id, embedding in zip(token_ids_XTI, embeddings):
199
+ token_embeds[token_id] = embedding
200
+ # logger.info(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
201
+ logger.info(f"weighs loaded")
202
+
203
+ logger.info(f"create embeddings for {args.num_vectors_per_token} tokens, for {args.token_string}")
204
+
205
+ # データセットを準備する
206
+ blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, args.masked_loss, False))
207
+ if args.dataset_config is not None:
208
+ logger.info(f"Load dataset config from {args.dataset_config}")
209
+ user_config = config_util.load_user_config(args.dataset_config)
210
+ ignored = ["train_data_dir", "reg_data_dir", "in_json"]
211
+ if any(getattr(args, attr) is not None for attr in ignored):
212
+ logger.info(
213
+ "ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
214
+ ", ".join(ignored)
215
+ )
216
+ )
217
+ else:
218
+ use_dreambooth_method = args.in_json is None
219
+ if use_dreambooth_method:
220
+ logger.info("Use DreamBooth method.")
221
+ user_config = {
222
+ "datasets": [
223
+ {"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(args.train_data_dir, args.reg_data_dir)}
224
+ ]
225
+ }
226
+ else:
227
+ logger.info("Train with captions.")
228
+ user_config = {
229
+ "datasets": [
230
+ {
231
+ "subsets": [
232
+ {
233
+ "image_dir": args.train_data_dir,
234
+ "metadata_file": args.in_json,
235
+ }
236
+ ]
237
+ }
238
+ ]
239
+ }
240
+
241
+ blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
242
+ train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
243
+ train_dataset_group.enable_XTI(XTI_layers, token_strings=token_strings)
244
+ current_epoch = Value("i", 0)
245
+ current_step = Value("i", 0)
246
+ ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
247
+ collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
248
+
249
+ # make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装
250
+ if use_template:
251
+ logger.info(f"use template for training captions. is object: {args.use_object_template}")
252
+ templates = imagenet_templates_small if args.use_object_template else imagenet_style_templates_small
253
+ replace_to = " ".join(token_strings)
254
+ captions = []
255
+ for tmpl in templates:
256
+ captions.append(tmpl.format(replace_to))
257
+ train_dataset_group.add_replacement("", captions)
258
+
259
+ if args.num_vectors_per_token > 1:
260
+ prompt_replacement = (args.token_string, replace_to)
261
+ else:
262
+ prompt_replacement = None
263
+ else:
264
+ if args.num_vectors_per_token > 1:
265
+ replace_to = " ".join(token_strings)
266
+ train_dataset_group.add_replacement(args.token_string, replace_to)
267
+ prompt_replacement = (args.token_string, replace_to)
268
+ else:
269
+ prompt_replacement = None
270
+
271
+ if args.debug_dataset:
272
+ train_util.debug_dataset(train_dataset_group, show_input_ids=True)
273
+ return
274
+ if len(train_dataset_group) == 0:
275
+ logger.error("No data found. Please verify arguments / 画像がありません。引数指定を確認してください")
276
+ return
277
+
278
+ if cache_latents:
279
+ assert (
280
+ train_dataset_group.is_latent_cacheable()
281
+ ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
282
+
283
+ # モデルに xformers とか memory efficient attention を組み込む
284
+ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa)
285
+ original_unet.UNet2DConditionModel.forward = unet_forward_XTI
286
+ original_unet.CrossAttnDownBlock2D.forward = downblock_forward_XTI
287
+ original_unet.CrossAttnUpBlock2D.forward = upblock_forward_XTI
288
+
289
+ # 学習を準備する
290
+ if cache_latents:
291
+ vae.to(accelerator.device, dtype=weight_dtype)
292
+ vae.requires_grad_(False)
293
+ vae.eval()
294
+ with torch.no_grad():
295
+ train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
296
+ vae.to("cpu")
297
+ clean_memory_on_device(accelerator.device)
298
+
299
+ accelerator.wait_for_everyone()
300
+
301
+ if args.gradient_checkpointing:
302
+ unet.enable_gradient_checkpointing()
303
+ text_encoder.gradient_checkpointing_enable()
304
+
305
+ # 学習に必要なクラスを準備する
306
+ logger.info("prepare optimizer, data loader etc.")
307
+ trainable_params = text_encoder.get_input_embeddings().parameters()
308
+ _, _, optimizer = train_util.get_optimizer(args, trainable_params)
309
+
310
+ # dataloaderを準備する
311
+ # DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
312
+ n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
313
+ train_dataloader = torch.utils.data.DataLoader(
314
+ train_dataset_group,
315
+ batch_size=1,
316
+ shuffle=True,
317
+ collate_fn=collator,
318
+ num_workers=n_workers,
319
+ persistent_workers=args.persistent_data_loader_workers,
320
+ )
321
+
322
+ # 学習ステップ数を計算する
323
+ if args.max_train_epochs is not None:
324
+ args.max_train_steps = args.max_train_epochs * math.ceil(
325
+ len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
326
+ )
327
+ logger.info(
328
+ f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
329
+ )
330
+
331
+ # データセット側にも学習ステップを送信
332
+ train_dataset_group.set_max_train_steps(args.max_train_steps)
333
+
334
+ # lr schedulerを用意する
335
+ lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
336
+
337
+ # acceleratorがなんかよろしくやってくれるらしい
338
+ text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
339
+ text_encoder, optimizer, train_dataloader, lr_scheduler
340
+ )
341
+
342
+ index_no_updates = torch.arange(len(tokenizer)) < token_ids_XTI[0]
343
+ # logger.info(len(index_no_updates), torch.sum(index_no_updates))
344
+ orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.detach().clone()
345
+
346
+ # Freeze all parameters except for the token embeddings in text encoder
347
+ text_encoder.requires_grad_(True)
348
+ text_encoder.text_model.encoder.requires_grad_(False)
349
+ text_encoder.text_model.final_layer_norm.requires_grad_(False)
350
+ text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
351
+ # text_encoder.text_model.embeddings.token_embedding.requires_grad_(True)
352
+
353
+ unet.requires_grad_(False)
354
+ unet.to(accelerator.device, dtype=weight_dtype)
355
+ if args.gradient_checkpointing: # according to TI example in Diffusers, train is required
356
+ unet.train()
357
+ else:
358
+ unet.eval()
359
+
360
+ if not cache_latents:
361
+ vae.requires_grad_(False)
362
+ vae.eval()
363
+ vae.to(accelerator.device, dtype=weight_dtype)
364
+
365
+ # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
366
+ if args.full_fp16:
367
+ train_util.patch_accelerator_for_fp16_training(accelerator)
368
+ text_encoder.to(weight_dtype)
369
+
370
+ # resumeする
371
+ train_util.resume_from_local_or_hf_if_specified(accelerator, args)
372
+
373
+ # epoch数を計算する
374
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
375
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
376
+ if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
377
+ args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
378
+
379
+ # 学習する
380
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
381
+ logger.info("running training / 学習開始")
382
+ logger.info(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
383
+ logger.info(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
384
+ logger.info(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
385
+ logger.info(f" num epochs / epoch数: {num_train_epochs}")
386
+ logger.info(f" batch size per device / バッチサイズ: {args.train_batch_size}")
387
+ logger.info(
388
+ f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}"
389
+ )
390
+ logger.info(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
391
+ logger.info(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
392
+
393
+ progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
394
+ global_step = 0
395
+
396
+ noise_scheduler = DDPMScheduler(
397
+ beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
398
+ )
399
+ prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
400
+ if args.zero_terminal_snr:
401
+ custom_train_functions.fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler)
402
+
403
+ if accelerator.is_main_process:
404
+ init_kwargs = {}
405
+ if args.wandb_run_name:
406
+ init_kwargs["wandb"] = {"name": args.wandb_run_name}
407
+ if args.log_tracker_config is not None:
408
+ init_kwargs = toml.load(args.log_tracker_config)
409
+ accelerator.init_trackers(
410
+ "textual_inversion" if args.log_tracker_name is None else args.log_tracker_name, init_kwargs=init_kwargs
411
+ )
412
+
413
+ # function for saving/removing
414
+ def save_model(ckpt_name, embs, steps, epoch_no, force_sync_upload=False):
415
+ os.makedirs(args.output_dir, exist_ok=True)
416
+ ckpt_file = os.path.join(args.output_dir, ckpt_name)
417
+
418
+ logger.info("")
419
+ logger.info(f"saving checkpoint: {ckpt_file}")
420
+ save_weights(ckpt_file, embs, save_dtype)
421
+ if args.huggingface_repo_id is not None:
422
+ huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
423
+
424
+ def remove_model(old_ckpt_name):
425
+ old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
426
+ if os.path.exists(old_ckpt_file):
427
+ logger.info(f"removing old checkpoint: {old_ckpt_file}")
428
+ os.remove(old_ckpt_file)
429
+
430
+ # training loop
431
+ for epoch in range(num_train_epochs):
432
+ logger.info("")
433
+ logger.info(f"epoch {epoch+1}/{num_train_epochs}")
434
+ current_epoch.value = epoch + 1
435
+
436
+ text_encoder.train()
437
+
438
+ loss_total = 0
439
+
440
+ for step, batch in enumerate(train_dataloader):
441
+ current_step.value = global_step
442
+ with accelerator.accumulate(text_encoder):
443
+ with torch.no_grad():
444
+ if "latents" in batch and batch["latents"] is not None:
445
+ latents = batch["latents"].to(accelerator.device).to(dtype=weight_dtype)
446
+ else:
447
+ # latentに変換
448
+ latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
449
+ latents = latents * 0.18215
450
+ b_size = latents.shape[0]
451
+
452
+ # Get the text embedding for conditioning
453
+ input_ids = batch["input_ids"].to(accelerator.device)
454
+ # weight_dtype) use float instead of fp16/bf16 because text encoder is float
455
+ encoder_hidden_states = torch.stack(
456
+ [
457
+ train_util.get_hidden_states(args, s, tokenizer, text_encoder, weight_dtype)
458
+ for s in torch.split(input_ids, 1, dim=1)
459
+ ]
460
+ )
461
+
462
+ # Sample noise, sample a random timestep for each image, and add noise to the latents,
463
+ # with noise offset and/or multires noise if specified
464
+ noise, noisy_latents, timesteps, huber_c = train_util.get_noise_noisy_latents_and_timesteps(args, noise_scheduler, latents)
465
+
466
+ # Predict the noise residual
467
+ with accelerator.autocast():
468
+ noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states).sample
469
+
470
+ if args.v_parameterization:
471
+ # v-parameterization training
472
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
473
+ else:
474
+ target = noise
475
+
476
+ loss = train_util.conditional_loss(noise_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c)
477
+ if args.masked_loss:
478
+ loss = apply_masked_loss(loss, batch)
479
+ loss = loss.mean([1, 2, 3])
480
+
481
+ loss_weights = batch["loss_weights"] # 各sampleごとのweight
482
+
483
+ loss = loss * loss_weights
484
+ if args.min_snr_gamma:
485
+ loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma, args.v_parameterization)
486
+ if args.scale_v_pred_loss_like_noise_pred:
487
+ loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler)
488
+ if args.debiased_estimation_loss:
489
+ loss = apply_debiased_estimation(loss, timesteps, noise_scheduler)
490
+
491
+ loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
492
+
493
+ accelerator.backward(loss)
494
+ if accelerator.sync_gradients and args.max_grad_norm != 0.0:
495
+ params_to_clip = text_encoder.get_input_embeddings().parameters()
496
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
497
+
498
+ optimizer.step()
499
+ lr_scheduler.step()
500
+ optimizer.zero_grad(set_to_none=True)
501
+
502
+ # Let's make sure we don't update any embedding weights besides the newly added token
503
+ with torch.no_grad():
504
+ accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = orig_embeds_params[
505
+ index_no_updates
506
+ ]
507
+
508
+ # Checks if the accelerator has performed an optimization step behind the scenes
509
+ if accelerator.sync_gradients:
510
+ progress_bar.update(1)
511
+ global_step += 1
512
+ # TODO: fix sample_images
513
+ # train_util.sample_images(
514
+ # accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet, prompt_replacement
515
+ # )
516
+
517
+ # 指定ステップごとにモデルを保存
518
+ if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0:
519
+ accelerator.wait_for_everyone()
520
+ if accelerator.is_main_process:
521
+ updated_embs = (
522
+ accelerator.unwrap_model(text_encoder)
523
+ .get_input_embeddings()
524
+ .weight[token_ids_XTI]
525
+ .data.detach()
526
+ .clone()
527
+ )
528
+
529
+ ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step)
530
+ save_model(ckpt_name, updated_embs, global_step, epoch)
531
+
532
+ if args.save_state:
533
+ train_util.save_and_remove_state_stepwise(args, accelerator, global_step)
534
+
535
+ remove_step_no = train_util.get_remove_step_no(args, global_step)
536
+ if remove_step_no is not None:
537
+ remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no)
538
+ remove_model(remove_ckpt_name)
539
+
540
+ current_loss = loss.detach().item()
541
+ if args.logging_dir is not None:
542
+ logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
543
+ if (
544
+ args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower()
545
+ ): # tracking d*lr value
546
+ logs["lr/d*lr"] = (
547
+ lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
548
+ )
549
+ accelerator.log(logs, step=global_step)
550
+
551
+ loss_total += current_loss
552
+ avr_loss = loss_total / (step + 1)
553
+ logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
554
+ progress_bar.set_postfix(**logs)
555
+
556
+ if global_step >= args.max_train_steps:
557
+ break
558
+
559
+ if args.logging_dir is not None:
560
+ logs = {"loss/epoch": loss_total / len(train_dataloader)}
561
+ accelerator.log(logs, step=epoch + 1)
562
+
563
+ accelerator.wait_for_everyone()
564
+
565
+ updated_embs = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[token_ids_XTI].data.detach().clone()
566
+
567
+ if args.save_every_n_epochs is not None:
568
+ saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs
569
+ if accelerator.is_main_process and saving:
570
+ ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1)
571
+ save_model(ckpt_name, updated_embs, epoch + 1, global_step)
572
+
573
+ remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1)
574
+ if remove_epoch_no is not None:
575
+ remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no)
576
+ remove_model(remove_ckpt_name)
577
+
578
+ if args.save_state:
579
+ train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1)
580
+
581
+ # TODO: fix sample_images
582
+ # train_util.sample_images(
583
+ # accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet, prompt_replacement
584
+ # )
585
+
586
+ # end of epoch
587
+
588
+ is_main_process = accelerator.is_main_process
589
+ if is_main_process:
590
+ text_encoder = accelerator.unwrap_model(text_encoder)
591
+
592
+ accelerator.end_training()
593
+
594
+ if is_main_process and (args.save_state or args.save_state_on_train_end):
595
+ train_util.save_state_on_train_end(args, accelerator)
596
+
597
+ updated_embs = text_encoder.get_input_embeddings().weight[token_ids_XTI].data.detach().clone()
598
+
599
+ del accelerator # この後メモリを使うのでこれは消す
600
+
601
+ if is_main_process:
602
+ ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as)
603
+ save_model(ckpt_name, updated_embs, global_step, num_train_epochs, force_sync_upload=True)
604
+
605
+ logger.info("model saved.")
606
+
607
+
608
+ def save_weights(file, updated_embs, save_dtype):
609
+ updated_embs = updated_embs.reshape(16, -1, updated_embs.shape[-1])
610
+ updated_embs = updated_embs.chunk(16)
611
+ XTI_layers = [
612
+ "IN01",
613
+ "IN02",
614
+ "IN04",
615
+ "IN05",
616
+ "IN07",
617
+ "IN08",
618
+ "MID",
619
+ "OUT03",
620
+ "OUT04",
621
+ "OUT05",
622
+ "OUT06",
623
+ "OUT07",
624
+ "OUT08",
625
+ "OUT09",
626
+ "OUT10",
627
+ "OUT11",
628
+ ]
629
+ state_dict = {}
630
+ for i, layer_name in enumerate(XTI_layers):
631
+ state_dict[layer_name] = updated_embs[i].squeeze(0).detach().clone().to("cpu").to(save_dtype)
632
+
633
+ # if save_dtype is not None:
634
+ # for key in list(state_dict.keys()):
635
+ # v = state_dict[key]
636
+ # v = v.detach().clone().to("cpu").to(save_dtype)
637
+ # state_dict[key] = v
638
+
639
+ if os.path.splitext(file)[1] == ".safetensors":
640
+ from safetensors.torch import save_file
641
+
642
+ save_file(state_dict, file)
643
+ else:
644
+ torch.save(state_dict, file) # can be loaded in Web UI
645
+
646
+
647
+ def load_weights(file):
648
+ if os.path.splitext(file)[1] == ".safetensors":
649
+ from safetensors.torch import load_file
650
+
651
+ data = load_file(file)
652
+ else:
653
+ raise ValueError(f"NOT XTI: {file}")
654
+
655
+ if len(data.values()) != 16:
656
+ raise ValueError(f"NOT XTI: {file}")
657
+
658
+ emb = torch.concat([x for x in data.values()])
659
+
660
+ return emb
661
+
662
+
663
+ def setup_parser() -> argparse.ArgumentParser:
664
+ parser = argparse.ArgumentParser()
665
+
666
+ add_logging_arguments(parser)
667
+ train_util.add_sd_models_arguments(parser)
668
+ train_util.add_dataset_arguments(parser, True, True, False)
669
+ train_util.add_training_arguments(parser, True)
670
+ train_util.add_masked_loss_arguments(parser)
671
+ deepspeed_utils.add_deepspeed_arguments(parser)
672
+ train_util.add_optimizer_arguments(parser)
673
+ config_util.add_config_arguments(parser)
674
+ custom_train_functions.add_custom_train_arguments(parser, False)
675
+
676
+ parser.add_argument(
677
+ "--save_model_as",
678
+ type=str,
679
+ default="pt",
680
+ choices=[None, "ckpt", "pt", "safetensors"],
681
+ help="format to save the model (default is .pt) / モデル保存時の形式(デフォルトはpt)",
682
+ )
683
+
684
+ parser.add_argument(
685
+ "--weights", type=str, default=None, help="embedding weights to initialize / 学習するネットワークの初期重み"
686
+ )
687
+ parser.add_argument(
688
+ "--num_vectors_per_token", type=int, default=1, help="number of vectors per token / トークンに割り当てるembeddingsの要素数"
689
+ )
690
+ parser.add_argument(
691
+ "--token_string",
692
+ type=str,
693
+ default=None,
694
+ help="token string used in training, must not exist in tokenizer / 学習時に使用されるトークン文字列、tokenizerに存在しない文字であること",
695
+ )
696
+ parser.add_argument(
697
+ "--init_word", type=str, default=None, help="words to initialize vector / ベクトルを初期化に使用する単語、複数可"
698
+ )
699
+ parser.add_argument(
700
+ "--use_object_template",
701
+ action="store_true",
702
+ help="ignore caption and use default templates for object / キャプションは使わずデフォルトの物体用テンプレートで学習する",
703
+ )
704
+ parser.add_argument(
705
+ "--use_style_template",
706
+ action="store_true",
707
+ help="ignore caption and use default templates for stype / キャプションは使わずデフォルトのスタイル用テンプレートで学習する",
708
+ )
709
+
710
+ return parser
711
+
712
+
713
+ if __name__ == "__main__":
714
+ parser = setup_parser()
715
+
716
+ args = parser.parse_args()
717
+ train_util.verify_command_line_training_args(args)
718
+ args = train_util.read_config_from_file(args, parser)
719
+
720
+ train(args)