ACCC1380 commited on
Commit
234f55f
1 Parent(s): e677f34

Upload lora-scripts/sd-scripts/train_network.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/train_network.py ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import argparse
3
+ import math
4
+ import os
5
+ import sys
6
+ import random
7
+ import time
8
+ import json
9
+ from multiprocessing import Value
10
+ import toml
11
+
12
+ from tqdm import tqdm
13
+
14
+ import torch
15
+ from library.device_utils import init_ipex, clean_memory_on_device
16
+
17
+ init_ipex()
18
+
19
+ from accelerate.utils import set_seed
20
+ from diffusers import DDPMScheduler
21
+ from library import deepspeed_utils, model_util
22
+
23
+ import library.train_util as train_util
24
+ from library.train_util import DreamBoothDataset
25
+ import library.config_util as config_util
26
+ from library.config_util import (
27
+ ConfigSanitizer,
28
+ BlueprintGenerator,
29
+ )
30
+ import library.huggingface_util as huggingface_util
31
+ import library.custom_train_functions as custom_train_functions
32
+ from library.custom_train_functions import (
33
+ apply_snr_weight,
34
+ get_weighted_text_embeddings,
35
+ prepare_scheduler_for_custom_training,
36
+ scale_v_prediction_loss_like_noise_prediction,
37
+ add_v_prediction_like_loss,
38
+ apply_debiased_estimation,
39
+ apply_masked_loss,
40
+ )
41
+ from library.utils import setup_logging, add_logging_arguments
42
+
43
+ setup_logging()
44
+ import logging
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ class NetworkTrainer:
50
+ def __init__(self):
51
+ self.vae_scale_factor = 0.18215
52
+ self.is_sdxl = False
53
+
54
+ # TODO 他のスクリプトと共通化する
55
+ def generate_step_logs(
56
+ self, args: argparse.Namespace, current_loss, avr_loss, lr_scheduler, keys_scaled=None, mean_norm=None, maximum_norm=None
57
+ ):
58
+ logs = {"loss/current": current_loss, "loss/average": avr_loss}
59
+
60
+ if keys_scaled is not None:
61
+ logs["max_norm/keys_scaled"] = keys_scaled
62
+ logs["max_norm/average_key_norm"] = mean_norm
63
+ logs["max_norm/max_key_norm"] = maximum_norm
64
+
65
+ lrs = lr_scheduler.get_last_lr()
66
+
67
+ if args.network_train_text_encoder_only or len(lrs) <= 2: # not block lr (or single block)
68
+ if args.network_train_unet_only:
69
+ logs["lr/unet"] = float(lrs[0])
70
+ elif args.network_train_text_encoder_only:
71
+ logs["lr/textencoder"] = float(lrs[0])
72
+ else:
73
+ logs["lr/textencoder"] = float(lrs[0])
74
+ logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder
75
+
76
+ if (
77
+ args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower()
78
+ ): # tracking d*lr value of unet.
79
+ logs["lr/d*lr"] = (
80
+ lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
81
+ )
82
+ else:
83
+ idx = 0
84
+ if not args.network_train_unet_only:
85
+ logs["lr/textencoder"] = float(lrs[0])
86
+ idx = 1
87
+
88
+ for i in range(idx, len(lrs)):
89
+ logs[f"lr/group{i}"] = float(lrs[i])
90
+ if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower():
91
+ logs[f"lr/d*lr/group{i}"] = (
92
+ lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
93
+ )
94
+
95
+ return logs
96
+
97
+ def assert_extra_args(self, args, train_dataset_group):
98
+ pass
99
+
100
+ def load_target_model(self, args, weight_dtype, accelerator):
101
+ text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator)
102
+ return model_util.get_model_version_str_for_sd1_sd2(args.v2, args.v_parameterization), text_encoder, vae, unet
103
+
104
+ def load_tokenizer(self, args):
105
+ tokenizer = train_util.load_tokenizer(args)
106
+ return tokenizer
107
+
108
+ def is_text_encoder_outputs_cached(self, args):
109
+ return False
110
+
111
+ def is_train_text_encoder(self, args):
112
+ return not args.network_train_unet_only and not self.is_text_encoder_outputs_cached(args)
113
+
114
+ def cache_text_encoder_outputs_if_needed(
115
+ self, args, accelerator, unet, vae, tokenizers, text_encoders, data_loader, weight_dtype
116
+ ):
117
+ for t_enc in text_encoders:
118
+ t_enc.to(accelerator.device, dtype=weight_dtype)
119
+
120
+ def get_text_cond(self, args, accelerator, batch, tokenizers, text_encoders, weight_dtype):
121
+ input_ids = batch["input_ids"].to(accelerator.device)
122
+ encoder_hidden_states = train_util.get_hidden_states(args, input_ids, tokenizers[0], text_encoders[0], weight_dtype)
123
+ return encoder_hidden_states
124
+
125
+ def call_unet(self, args, accelerator, unet, noisy_latents, timesteps, text_conds, batch, weight_dtype):
126
+ noise_pred = unet(noisy_latents, timesteps, text_conds).sample
127
+ return noise_pred
128
+
129
+ def all_reduce_network(self, accelerator, network):
130
+ for param in network.parameters():
131
+ if param.grad is not None:
132
+ param.grad = accelerator.reduce(param.grad, reduction="mean")
133
+
134
+ def sample_images(self, accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet):
135
+ train_util.sample_images(accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet)
136
+
137
+ def train(self, args):
138
+ session_id = random.randint(0, 2**32)
139
+ training_started_at = time.time()
140
+ train_util.verify_training_args(args)
141
+ train_util.prepare_dataset_args(args, True)
142
+ deepspeed_utils.prepare_deepspeed_args(args)
143
+ setup_logging(args, reset=True)
144
+
145
+ cache_latents = args.cache_latents
146
+ use_dreambooth_method = args.in_json is None
147
+ use_user_config = args.dataset_config is not None
148
+
149
+ if args.seed is None:
150
+ args.seed = random.randint(0, 2**32)
151
+ set_seed(args.seed)
152
+
153
+ # tokenizerは単体またはリスト、tokenizersは必ずリスト:既存のコードとの互換性のため
154
+ tokenizer = self.load_tokenizer(args)
155
+ tokenizers = tokenizer if isinstance(tokenizer, list) else [tokenizer]
156
+
157
+ # データセットを準備する
158
+ if args.dataset_class is None:
159
+ blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, args.masked_loss, True))
160
+ if use_user_config:
161
+ logger.info(f"Loading dataset config from {args.dataset_config}")
162
+ user_config = config_util.load_user_config(args.dataset_config)
163
+ ignored = ["train_data_dir", "reg_data_dir", "in_json"]
164
+ if any(getattr(args, attr) is not None for attr in ignored):
165
+ logger.warning(
166
+ "ignoring the following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
167
+ ", ".join(ignored)
168
+ )
169
+ )
170
+ else:
171
+ if use_dreambooth_method:
172
+ logger.info("Using DreamBooth method.")
173
+ user_config = {
174
+ "datasets": [
175
+ {
176
+ "subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(
177
+ args.train_data_dir, args.reg_data_dir
178
+ )
179
+ }
180
+ ]
181
+ }
182
+ else:
183
+ logger.info("Training with captions.")
184
+ user_config = {
185
+ "datasets": [
186
+ {
187
+ "subsets": [
188
+ {
189
+ "image_dir": args.train_data_dir,
190
+ "metadata_file": args.in_json,
191
+ }
192
+ ]
193
+ }
194
+ ]
195
+ }
196
+
197
+ blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
198
+ train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
199
+ else:
200
+ # use arbitrary dataset class
201
+ train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer)
202
+
203
+ current_epoch = Value("i", 0)
204
+ current_step = Value("i", 0)
205
+ ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
206
+ collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
207
+
208
+ if args.debug_dataset:
209
+ train_util.debug_dataset(train_dataset_group)
210
+ return
211
+ if len(train_dataset_group) == 0:
212
+ logger.error(
213
+ "No data found. Please verify arguments (train_data_dir must be the parent of folders with images) / 画像がありません。引数指定を確認してください(train_data_dirには画像があるフォルダではなく、画像があるフォルダの親フォルダを指定する必要があります)"
214
+ )
215
+ return
216
+
217
+ if cache_latents:
218
+ assert (
219
+ train_dataset_group.is_latent_cacheable()
220
+ ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
221
+
222
+ self.assert_extra_args(args, train_dataset_group)
223
+
224
+ # acceleratorを準備する
225
+ logger.info("preparing accelerator")
226
+ accelerator = train_util.prepare_accelerator(args)
227
+ is_main_process = accelerator.is_main_process
228
+
229
+ # mixed precisionに対応した型を用意しておき適宜castする
230
+ weight_dtype, save_dtype = train_util.prepare_dtype(args)
231
+ vae_dtype = torch.float32 if args.no_half_vae else weight_dtype
232
+
233
+ # モデルを読み込む
234
+ model_version, text_encoder, vae, unet = self.load_target_model(args, weight_dtype, accelerator)
235
+
236
+ # text_encoder is List[CLIPTextModel] or CLIPTextModel
237
+ text_encoders = text_encoder if isinstance(text_encoder, list) else [text_encoder]
238
+
239
+ # モデルに xformers とか memory efficient attention を組み込む
240
+ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa)
241
+ if torch.__version__ >= "2.0.0": # PyTorch 2.0.0 以上対応のxformersなら以下が使える
242
+ vae.set_use_memory_efficient_attention_xformers(args.xformers)
243
+
244
+ # 差分追加学習のためにモデルを読み込む
245
+ sys.path.append(os.path.dirname(__file__))
246
+ accelerator.print("import network module:", args.network_module)
247
+ network_module = importlib.import_module(args.network_module)
248
+
249
+ if args.base_weights is not None:
250
+ # base_weights が指定されている場合は、指定された重みを読み込みマージする
251
+ for i, weight_path in enumerate(args.base_weights):
252
+ if args.base_weights_multiplier is None or len(args.base_weights_multiplier) <= i:
253
+ multiplier = 1.0
254
+ else:
255
+ multiplier = args.base_weights_multiplier[i]
256
+
257
+ accelerator.print(f"merging module: {weight_path} with multiplier {multiplier}")
258
+
259
+ module, weights_sd = network_module.create_network_from_weights(
260
+ multiplier, weight_path, vae, text_encoder, unet, for_inference=True
261
+ )
262
+ module.merge_to(text_encoder, unet, weights_sd, weight_dtype, accelerator.device if args.lowram else "cpu")
263
+
264
+ accelerator.print(f"all weights merged: {', '.join(args.base_weights)}")
265
+
266
+ # 学習を準備する
267
+ if cache_latents:
268
+ vae.to(accelerator.device, dtype=vae_dtype)
269
+ vae.requires_grad_(False)
270
+ vae.eval()
271
+ with torch.no_grad():
272
+ train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
273
+ vae.to("cpu")
274
+ clean_memory_on_device(accelerator.device)
275
+
276
+ accelerator.wait_for_everyone()
277
+
278
+ # 必要ならテキストエンコーダーの出力をキャッシュする: Text Encoderはcpuまたはgpuへ移される
279
+ # cache text encoder outputs if needed: Text Encoder is moved to cpu or gpu
280
+ self.cache_text_encoder_outputs_if_needed(
281
+ args, accelerator, unet, vae, tokenizers, text_encoders, train_dataset_group, weight_dtype
282
+ )
283
+
284
+ # prepare network
285
+ net_kwargs = {}
286
+ if args.network_args is not None:
287
+ for net_arg in args.network_args:
288
+ key, value = net_arg.split("=")
289
+ net_kwargs[key] = value
290
+
291
+ # if a new network is added in future, add if ~ then blocks for each network (;'∀')
292
+ if args.dim_from_weights:
293
+ network, _ = network_module.create_network_from_weights(1, args.network_weights, vae, text_encoder, unet, **net_kwargs)
294
+ else:
295
+ if "dropout" not in net_kwargs:
296
+ # workaround for LyCORIS (;^ω^)
297
+ net_kwargs["dropout"] = args.network_dropout
298
+
299
+ network = network_module.create_network(
300
+ 1.0,
301
+ args.network_dim,
302
+ args.network_alpha,
303
+ vae,
304
+ text_encoder,
305
+ unet,
306
+ neuron_dropout=args.network_dropout,
307
+ **net_kwargs,
308
+ )
309
+ if network is None:
310
+ return
311
+ network_has_multiplier = hasattr(network, "set_multiplier")
312
+
313
+ if hasattr(network, "prepare_network"):
314
+ network.prepare_network(args)
315
+ if args.scale_weight_norms and not hasattr(network, "apply_max_norm_regularization"):
316
+ logger.warning(
317
+ "warning: scale_weight_norms is specified but the network does not support it / scale_weight_normsが指定されていますが、ネットワークが対応していません"
318
+ )
319
+ args.scale_weight_norms = False
320
+
321
+ train_unet = not args.network_train_text_encoder_only
322
+ train_text_encoder = self.is_train_text_encoder(args)
323
+ network.apply_to(text_encoder, unet, train_text_encoder, train_unet)
324
+
325
+ if args.network_weights is not None:
326
+ info = network.load_weights(args.network_weights)
327
+ accelerator.print(f"load network weights from {args.network_weights}: {info}")
328
+
329
+ if args.gradient_checkpointing:
330
+ unet.enable_gradient_checkpointing()
331
+ for t_enc in text_encoders:
332
+ t_enc.gradient_checkpointing_enable()
333
+ del t_enc
334
+ network.enable_gradient_checkpointing() # may have no effect
335
+
336
+ # 学習に必要なクラスを準備する
337
+ accelerator.print("prepare optimizer, data loader etc.")
338
+
339
+ # 後方互換性を確保するよ
340
+ try:
341
+ trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr, args.learning_rate)
342
+ except TypeError:
343
+ accelerator.print(
344
+ "Deprecated: use prepare_optimizer_params(text_encoder_lr, unet_lr, learning_rate) instead of prepare_optimizer_params(text_encoder_lr, unet_lr)"
345
+ )
346
+ trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr)
347
+
348
+ optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(args, trainable_params)
349
+
350
+ # dataloaderを準備する
351
+ # DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
352
+ n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
353
+
354
+ train_dataloader = torch.utils.data.DataLoader(
355
+ train_dataset_group,
356
+ batch_size=1,
357
+ shuffle=True,
358
+ collate_fn=collator,
359
+ num_workers=n_workers,
360
+ persistent_workers=args.persistent_data_loader_workers,
361
+ )
362
+
363
+ # 学習ステップ数を計算する
364
+ if args.max_train_epochs is not None:
365
+ args.max_train_steps = args.max_train_epochs * math.ceil(
366
+ len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
367
+ )
368
+ accelerator.print(
369
+ f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
370
+ )
371
+
372
+ # データセット側にも学習ステップを送信
373
+ train_dataset_group.set_max_train_steps(args.max_train_steps)
374
+
375
+ # lr schedulerを用意する
376
+ lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
377
+
378
+ # 実験的機能:勾配も含めたfp16/bf16学習を行う モデル全体をfp16/bf16にする
379
+ if args.full_fp16:
380
+ assert (
381
+ args.mixed_precision == "fp16"
382
+ ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
383
+ accelerator.print("enable full fp16 training.")
384
+ network.to(weight_dtype)
385
+ elif args.full_bf16:
386
+ assert (
387
+ args.mixed_precision == "bf16"
388
+ ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。"
389
+ accelerator.print("enable full bf16 training.")
390
+ network.to(weight_dtype)
391
+
392
+ unet_weight_dtype = te_weight_dtype = weight_dtype
393
+ # Experimental Feature: Put base model into fp8 to save vram
394
+ if args.fp8_base:
395
+ assert torch.__version__ >= "2.1.0", "fp8_base requires torch>=2.1.0 / fp8を使う場合はtorch>=2.1.0が必要です。"
396
+ assert (
397
+ args.mixed_precision != "no"
398
+ ), "fp8_base requires mixed precision='fp16' or 'bf16' / fp8を使う場合はmixed_precision='fp16'または'bf16'が必要です。"
399
+ accelerator.print("enable fp8 training.")
400
+ unet_weight_dtype = torch.float8_e4m3fn
401
+ te_weight_dtype = torch.float8_e4m3fn
402
+
403
+ unet.requires_grad_(False)
404
+ unet.to(dtype=unet_weight_dtype)
405
+ for t_enc in text_encoders:
406
+ t_enc.requires_grad_(False)
407
+
408
+ # in case of cpu, dtype is already set to fp32 because cpu does not support fp8/fp16/bf16
409
+ if t_enc.device.type != "cpu":
410
+ t_enc.to(dtype=te_weight_dtype)
411
+ # nn.Embedding not support FP8
412
+ t_enc.text_model.embeddings.to(dtype=(weight_dtype if te_weight_dtype != weight_dtype else te_weight_dtype))
413
+
414
+ # acceleratorがなんかよろしくやってくれるらしい / accelerator will do something good
415
+ if args.deepspeed:
416
+ ds_model = deepspeed_utils.prepare_deepspeed_model(
417
+ args,
418
+ unet=unet if train_unet else None,
419
+ text_encoder1=text_encoders[0] if train_text_encoder else None,
420
+ text_encoder2=text_encoders[1] if train_text_encoder and len(text_encoders) > 1 else None,
421
+ network=network,
422
+ )
423
+ ds_model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
424
+ ds_model, optimizer, train_dataloader, lr_scheduler
425
+ )
426
+ training_model = ds_model
427
+ else:
428
+ if train_unet:
429
+ unet = accelerator.prepare(unet)
430
+ else:
431
+ unet.to(accelerator.device, dtype=unet_weight_dtype) # move to device because unet is not prepared by accelerator
432
+ if train_text_encoder:
433
+ if len(text_encoders) > 1:
434
+ text_encoder = text_encoders = [accelerator.prepare(t_enc) for t_enc in text_encoders]
435
+ else:
436
+ text_encoder = accelerator.prepare(text_encoder)
437
+ text_encoders = [text_encoder]
438
+ else:
439
+ pass # if text_encoder is not trained, no need to prepare. and device and dtype are already set
440
+
441
+ network, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
442
+ network, optimizer, train_dataloader, lr_scheduler
443
+ )
444
+ training_model = network
445
+
446
+ if args.gradient_checkpointing:
447
+ # according to TI example in Diffusers, train is required
448
+ unet.train()
449
+ for t_enc in text_encoders:
450
+ t_enc.train()
451
+
452
+ # set top parameter requires_grad = True for gradient checkpointing works
453
+ if train_text_encoder:
454
+ t_enc.text_model.embeddings.requires_grad_(True)
455
+
456
+ else:
457
+ unet.eval()
458
+ for t_enc in text_encoders:
459
+ t_enc.eval()
460
+
461
+ del t_enc
462
+
463
+ accelerator.unwrap_model(network).prepare_grad_etc(text_encoder, unet)
464
+
465
+ if not cache_latents: # キャッシュしない場合はVAEを使うのでVAEを準備する
466
+ vae.requires_grad_(False)
467
+ vae.eval()
468
+ vae.to(accelerator.device, dtype=vae_dtype)
469
+
470
+ # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
471
+ if args.full_fp16:
472
+ train_util.patch_accelerator_for_fp16_training(accelerator)
473
+
474
+ # before resuming make hook for saving/loading to save/load the network weights only
475
+ def save_model_hook(models, weights, output_dir):
476
+ # pop weights of other models than network to save only network weights
477
+ # only main process or deepspeed https://github.com/huggingface/diffusers/issues/2606
478
+ if accelerator.is_main_process or args.deepspeed:
479
+ remove_indices = []
480
+ for i, model in enumerate(models):
481
+ if not isinstance(model, type(accelerator.unwrap_model(network))):
482
+ remove_indices.append(i)
483
+ for i in reversed(remove_indices):
484
+ if len(weights) > i:
485
+ weights.pop(i)
486
+ # print(f"save model hook: {len(weights)} weights will be saved")
487
+
488
+ def load_model_hook(models, input_dir):
489
+ # remove models except network
490
+ remove_indices = []
491
+ for i, model in enumerate(models):
492
+ if not isinstance(model, type(accelerator.unwrap_model(network))):
493
+ remove_indices.append(i)
494
+ for i in reversed(remove_indices):
495
+ models.pop(i)
496
+ # print(f"load model hook: {len(models)} models will be loaded")
497
+
498
+ accelerator.register_save_state_pre_hook(save_model_hook)
499
+ accelerator.register_load_state_pre_hook(load_model_hook)
500
+
501
+ # resumeする
502
+ train_util.resume_from_local_or_hf_if_specified(accelerator, args)
503
+
504
+ # epoch数を計算する
505
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
506
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
507
+ if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
508
+ args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
509
+
510
+ # 学習する
511
+ # TODO: find a way to handle total batch size when there are multiple datasets
512
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
513
+
514
+ accelerator.print("running training / 学習開始")
515
+ accelerator.print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
516
+ accelerator.print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
517
+ accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
518
+ accelerator.print(f" num epochs / epoch数: {num_train_epochs}")
519
+ accelerator.print(
520
+ f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}"
521
+ )
522
+ # accelerator.print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
523
+ accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
524
+ accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
525
+
526
+ # TODO refactor metadata creation and move to util
527
+ metadata = {
528
+ "ss_session_id": session_id, # random integer indicating which group of epochs the model came from
529
+ "ss_training_started_at": training_started_at, # unix timestamp
530
+ "ss_output_name": args.output_name,
531
+ "ss_learning_rate": args.learning_rate,
532
+ "ss_text_encoder_lr": args.text_encoder_lr,
533
+ "ss_unet_lr": args.unet_lr,
534
+ "ss_num_train_images": train_dataset_group.num_train_images,
535
+ "ss_num_reg_images": train_dataset_group.num_reg_images,
536
+ "ss_num_batches_per_epoch": len(train_dataloader),
537
+ "ss_num_epochs": num_train_epochs,
538
+ "ss_gradient_checkpointing": args.gradient_checkpointing,
539
+ "ss_gradient_accumulation_steps": args.gradient_accumulation_steps,
540
+ "ss_max_train_steps": args.max_train_steps,
541
+ "ss_lr_warmup_steps": args.lr_warmup_steps,
542
+ "ss_lr_scheduler": args.lr_scheduler,
543
+ "ss_network_module": args.network_module,
544
+ "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim
545
+ "ss_network_alpha": args.network_alpha, # some networks may not have alpha
546
+ "ss_network_dropout": args.network_dropout, # some networks may not have dropout
547
+ "ss_mixed_precision": args.mixed_precision,
548
+ "ss_full_fp16": bool(args.full_fp16),
549
+ "ss_v2": bool(args.v2),
550
+ "ss_base_model_version": model_version,
551
+ "ss_clip_skip": args.clip_skip,
552
+ "ss_max_token_length": args.max_token_length,
553
+ "ss_cache_latents": bool(args.cache_latents),
554
+ "ss_seed": args.seed,
555
+ "ss_lowram": args.lowram,
556
+ "ss_noise_offset": args.noise_offset,
557
+ "ss_multires_noise_iterations": args.multires_noise_iterations,
558
+ "ss_multires_noise_discount": args.multires_noise_discount,
559
+ "ss_adaptive_noise_scale": args.adaptive_noise_scale,
560
+ "ss_zero_terminal_snr": args.zero_terminal_snr,
561
+ "ss_training_comment": args.training_comment, # will not be updated after training
562
+ "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(),
563
+ "ss_optimizer": optimizer_name + (f"({optimizer_args})" if len(optimizer_args) > 0 else ""),
564
+ "ss_max_grad_norm": args.max_grad_norm,
565
+ "ss_caption_dropout_rate": args.caption_dropout_rate,
566
+ "ss_caption_dropout_every_n_epochs": args.caption_dropout_every_n_epochs,
567
+ "ss_caption_tag_dropout_rate": args.caption_tag_dropout_rate,
568
+ "ss_face_crop_aug_range": args.face_crop_aug_range,
569
+ "ss_prior_loss_weight": args.prior_loss_weight,
570
+ "ss_min_snr_gamma": args.min_snr_gamma,
571
+ "ss_scale_weight_norms": args.scale_weight_norms,
572
+ "ss_ip_noise_gamma": args.ip_noise_gamma,
573
+ "ss_debiased_estimation": bool(args.debiased_estimation_loss),
574
+ "ss_noise_offset_random_strength": args.noise_offset_random_strength,
575
+ "ss_ip_noise_gamma_random_strength": args.ip_noise_gamma_random_strength,
576
+ "ss_loss_type": args.loss_type,
577
+ "ss_huber_schedule": args.huber_schedule,
578
+ "ss_huber_c": args.huber_c,
579
+ }
580
+
581
+ if use_user_config:
582
+ # save metadata of multiple datasets
583
+ # NOTE: pack "ss_datasets" value as json one time
584
+ # or should also pack nested collections as json?
585
+ datasets_metadata = []
586
+ tag_frequency = {} # merge tag frequency for metadata editor
587
+ dataset_dirs_info = {} # merge subset dirs for metadata editor
588
+
589
+ for dataset in train_dataset_group.datasets:
590
+ is_dreambooth_dataset = isinstance(dataset, DreamBoothDataset)
591
+ dataset_metadata = {
592
+ "is_dreambooth": is_dreambooth_dataset,
593
+ "batch_size_per_device": dataset.batch_size,
594
+ "num_train_images": dataset.num_train_images, # includes repeating
595
+ "num_reg_images": dataset.num_reg_images,
596
+ "resolution": (dataset.width, dataset.height),
597
+ "enable_bucket": bool(dataset.enable_bucket),
598
+ "min_bucket_reso": dataset.min_bucket_reso,
599
+ "max_bucket_reso": dataset.max_bucket_reso,
600
+ "tag_frequency": dataset.tag_frequency,
601
+ "bucket_info": dataset.bucket_info,
602
+ }
603
+
604
+ subsets_metadata = []
605
+ for subset in dataset.subsets:
606
+ subset_metadata = {
607
+ "img_count": subset.img_count,
608
+ "num_repeats": subset.num_repeats,
609
+ "color_aug": bool(subset.color_aug),
610
+ "flip_aug": bool(subset.flip_aug),
611
+ "random_crop": bool(subset.random_crop),
612
+ "shuffle_caption": bool(subset.shuffle_caption),
613
+ "keep_tokens": subset.keep_tokens,
614
+ "keep_tokens_separator": subset.keep_tokens_separator,
615
+ "secondary_separator": subset.secondary_separator,
616
+ "enable_wildcard": bool(subset.enable_wildcard),
617
+ "caption_prefix": subset.caption_prefix,
618
+ "caption_suffix": subset.caption_suffix,
619
+ }
620
+
621
+ image_dir_or_metadata_file = None
622
+ if subset.image_dir:
623
+ image_dir = os.path.basename(subset.image_dir)
624
+ subset_metadata["image_dir"] = image_dir
625
+ image_dir_or_metadata_file = image_dir
626
+
627
+ if is_dreambooth_dataset:
628
+ subset_metadata["class_tokens"] = subset.class_tokens
629
+ subset_metadata["is_reg"] = subset.is_reg
630
+ if subset.is_reg:
631
+ image_dir_or_metadata_file = None # not merging reg dataset
632
+ else:
633
+ metadata_file = os.path.basename(subset.metadata_file)
634
+ subset_metadata["metadata_file"] = metadata_file
635
+ image_dir_or_metadata_file = metadata_file # may overwrite
636
+
637
+ subsets_metadata.append(subset_metadata)
638
+
639
+ # merge dataset dir: not reg subset only
640
+ # TODO update additional-network extension to show detailed dataset config from metadata
641
+ if image_dir_or_metadata_file is not None:
642
+ # datasets may have a certain dir multiple times
643
+ v = image_dir_or_metadata_file
644
+ i = 2
645
+ while v in dataset_dirs_info:
646
+ v = image_dir_or_metadata_file + f" ({i})"
647
+ i += 1
648
+ image_dir_or_metadata_file = v
649
+
650
+ dataset_dirs_info[image_dir_or_metadata_file] = {
651
+ "n_repeats": subset.num_repeats,
652
+ "img_count": subset.img_count,
653
+ }
654
+
655
+ dataset_metadata["subsets"] = subsets_metadata
656
+ datasets_metadata.append(dataset_metadata)
657
+
658
+ # merge tag frequency:
659
+ for ds_dir_name, ds_freq_for_dir in dataset.tag_frequency.items():
660
+ # あるディレクトリが複数のdatasetで使用されている場合、一度だけ数える
661
+ # もともと繰り返し回数を指定しているので、キャプション内でのタグの出現回数と、それが学習で何度使われるかは一致しない
662
+ # なので、ここで複数datasetの回数を合算してもあまり意味はない
663
+ if ds_dir_name in tag_frequency:
664
+ continue
665
+ tag_frequency[ds_dir_name] = ds_freq_for_dir
666
+
667
+ metadata["ss_datasets"] = json.dumps(datasets_metadata)
668
+ metadata["ss_tag_frequency"] = json.dumps(tag_frequency)
669
+ metadata["ss_dataset_dirs"] = json.dumps(dataset_dirs_info)
670
+ else:
671
+ # conserving backward compatibility when using train_dataset_dir and reg_dataset_dir
672
+ assert (
673
+ len(train_dataset_group.datasets) == 1
674
+ ), f"There should be a single dataset but {len(train_dataset_group.datasets)} found. This seems to be a bug. / データセットは1個だけ存在するはずですが、実際には{len(train_dataset_group.datasets)}個でした。プログラムのバグかもしれません。"
675
+
676
+ dataset = train_dataset_group.datasets[0]
677
+
678
+ dataset_dirs_info = {}
679
+ reg_dataset_dirs_info = {}
680
+ if use_dreambooth_method:
681
+ for subset in dataset.subsets:
682
+ info = reg_dataset_dirs_info if subset.is_reg else dataset_dirs_info
683
+ info[os.path.basename(subset.image_dir)] = {"n_repeats": subset.num_repeats, "img_count": subset.img_count}
684
+ else:
685
+ for subset in dataset.subsets:
686
+ dataset_dirs_info[os.path.basename(subset.metadata_file)] = {
687
+ "n_repeats": subset.num_repeats,
688
+ "img_count": subset.img_count,
689
+ }
690
+
691
+ metadata.update(
692
+ {
693
+ "ss_batch_size_per_device": args.train_batch_size,
694
+ "ss_total_batch_size": total_batch_size,
695
+ "ss_resolution": args.resolution,
696
+ "ss_color_aug": bool(args.color_aug),
697
+ "ss_flip_aug": bool(args.flip_aug),
698
+ "ss_random_crop": bool(args.random_crop),
699
+ "ss_shuffle_caption": bool(args.shuffle_caption),
700
+ "ss_enable_bucket": bool(dataset.enable_bucket),
701
+ "ss_bucket_no_upscale": bool(dataset.bucket_no_upscale),
702
+ "ss_min_bucket_reso": dataset.min_bucket_reso,
703
+ "ss_max_bucket_reso": dataset.max_bucket_reso,
704
+ "ss_keep_tokens": args.keep_tokens,
705
+ "ss_dataset_dirs": json.dumps(dataset_dirs_info),
706
+ "ss_reg_dataset_dirs": json.dumps(reg_dataset_dirs_info),
707
+ "ss_tag_frequency": json.dumps(dataset.tag_frequency),
708
+ "ss_bucket_info": json.dumps(dataset.bucket_info),
709
+ }
710
+ )
711
+
712
+ # add extra args
713
+ if args.network_args:
714
+ metadata["ss_network_args"] = json.dumps(net_kwargs)
715
+
716
+ # model name and hash
717
+ if args.pretrained_model_name_or_path is not None:
718
+ sd_model_name = args.pretrained_model_name_or_path
719
+ if os.path.exists(sd_model_name):
720
+ metadata["ss_sd_model_hash"] = train_util.model_hash(sd_model_name)
721
+ metadata["ss_new_sd_model_hash"] = train_util.calculate_sha256(sd_model_name)
722
+ sd_model_name = os.path.basename(sd_model_name)
723
+ metadata["ss_sd_model_name"] = sd_model_name
724
+
725
+ if args.vae is not None:
726
+ vae_name = args.vae
727
+ if os.path.exists(vae_name):
728
+ metadata["ss_vae_hash"] = train_util.model_hash(vae_name)
729
+ metadata["ss_new_vae_hash"] = train_util.calculate_sha256(vae_name)
730
+ vae_name = os.path.basename(vae_name)
731
+ metadata["ss_vae_name"] = vae_name
732
+
733
+ metadata = {k: str(v) for k, v in metadata.items()}
734
+
735
+ # make minimum metadata for filtering
736
+ minimum_metadata = {}
737
+ for key in train_util.SS_METADATA_MINIMUM_KEYS:
738
+ if key in metadata:
739
+ minimum_metadata[key] = metadata[key]
740
+
741
+ progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
742
+ global_step = 0
743
+
744
+ noise_scheduler = DDPMScheduler(
745
+ beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
746
+ )
747
+ prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
748
+ if args.zero_terminal_snr:
749
+ custom_train_functions.fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler)
750
+
751
+ if accelerator.is_main_process:
752
+ init_kwargs = {}
753
+ if args.wandb_run_name:
754
+ init_kwargs["wandb"] = {"name": args.wandb_run_name}
755
+ if args.log_tracker_config is not None:
756
+ init_kwargs = toml.load(args.log_tracker_config)
757
+ accelerator.init_trackers(
758
+ "network_train" if args.log_tracker_name is None else args.log_tracker_name, init_kwargs=init_kwargs
759
+ )
760
+
761
+ loss_recorder = train_util.LossRecorder()
762
+ del train_dataset_group
763
+
764
+ # callback for step start
765
+ if hasattr(accelerator.unwrap_model(network), "on_step_start"):
766
+ on_step_start = accelerator.unwrap_model(network).on_step_start
767
+ else:
768
+ on_step_start = lambda *args, **kwargs: None
769
+
770
+ # function for saving/removing
771
+ def save_model(ckpt_name, unwrapped_nw, steps, epoch_no, force_sync_upload=False):
772
+ os.makedirs(args.output_dir, exist_ok=True)
773
+ ckpt_file = os.path.join(args.output_dir, ckpt_name)
774
+
775
+ accelerator.print(f"\nsaving checkpoint: {ckpt_file}")
776
+ metadata["ss_training_finished_at"] = str(time.time())
777
+ metadata["ss_steps"] = str(steps)
778
+ metadata["ss_epoch"] = str(epoch_no)
779
+
780
+ metadata_to_save = minimum_metadata if args.no_metadata else metadata
781
+ sai_metadata = train_util.get_sai_model_spec(None, args, self.is_sdxl, True, False)
782
+ metadata_to_save.update(sai_metadata)
783
+
784
+ unwrapped_nw.save_weights(ckpt_file, save_dtype, metadata_to_save)
785
+ if args.huggingface_repo_id is not None:
786
+ huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
787
+
788
+ def remove_model(old_ckpt_name):
789
+ old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
790
+ if os.path.exists(old_ckpt_file):
791
+ accelerator.print(f"removing old checkpoint: {old_ckpt_file}")
792
+ os.remove(old_ckpt_file)
793
+
794
+ # For --sample_at_first
795
+ self.sample_images(accelerator, args, 0, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
796
+
797
+ # training loop
798
+ for epoch in range(num_train_epochs):
799
+ accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}")
800
+ current_epoch.value = epoch + 1
801
+
802
+ metadata["ss_epoch"] = str(epoch + 1)
803
+
804
+ accelerator.unwrap_model(network).on_epoch_start(text_encoder, unet)
805
+
806
+ for step, batch in enumerate(train_dataloader):
807
+ current_step.value = global_step
808
+ with accelerator.accumulate(training_model):
809
+ on_step_start(text_encoder, unet)
810
+
811
+ if "latents" in batch and batch["latents"] is not None:
812
+ latents = batch["latents"].to(accelerator.device).to(dtype=weight_dtype)
813
+ else:
814
+ with torch.no_grad():
815
+ # latentに変換
816
+ latents = vae.encode(batch["images"].to(dtype=vae_dtype)).latent_dist.sample().to(dtype=weight_dtype)
817
+
818
+ # NaNが含まれていれば警告を表示し0に置き換える
819
+ if torch.any(torch.isnan(latents)):
820
+ accelerator.print("NaN found in latents, replacing with zeros")
821
+ latents = torch.nan_to_num(latents, 0, out=latents)
822
+ latents = latents * self.vae_scale_factor
823
+
824
+ # get multiplier for each sample
825
+ if network_has_multiplier:
826
+ multipliers = batch["network_multipliers"]
827
+ # if all multipliers are same, use single multiplier
828
+ if torch.all(multipliers == multipliers[0]):
829
+ multipliers = multipliers[0].item()
830
+ else:
831
+ raise NotImplementedError("multipliers for each sample is not supported yet")
832
+ # print(f"set multiplier: {multipliers}")
833
+ accelerator.unwrap_model(network).set_multiplier(multipliers)
834
+
835
+ with torch.set_grad_enabled(train_text_encoder), accelerator.autocast():
836
+ # Get the text embedding for conditioning
837
+ if args.weighted_captions:
838
+ text_encoder_conds = get_weighted_text_embeddings(
839
+ tokenizer,
840
+ text_encoder,
841
+ batch["captions"],
842
+ accelerator.device,
843
+ args.max_token_length // 75 if args.max_token_length else 1,
844
+ clip_skip=args.clip_skip,
845
+ )
846
+ else:
847
+ text_encoder_conds = self.get_text_cond(
848
+ args, accelerator, batch, tokenizers, text_encoders, weight_dtype
849
+ )
850
+
851
+ # Sample noise, sample a random timestep for each image, and add noise to the latents,
852
+ # with noise offset and/or multires noise if specified
853
+ noise, noisy_latents, timesteps, huber_c = train_util.get_noise_noisy_latents_and_timesteps(
854
+ args, noise_scheduler, latents
855
+ )
856
+
857
+ # ensure the hidden state will require grad
858
+ if args.gradient_checkpointing:
859
+ for x in noisy_latents:
860
+ x.requires_grad_(True)
861
+ for t in text_encoder_conds:
862
+ t.requires_grad_(True)
863
+
864
+ # Predict the noise residual
865
+ with accelerator.autocast():
866
+ noise_pred = self.call_unet(
867
+ args,
868
+ accelerator,
869
+ unet,
870
+ noisy_latents.requires_grad_(train_unet),
871
+ timesteps,
872
+ text_encoder_conds,
873
+ batch,
874
+ weight_dtype,
875
+ )
876
+
877
+ if args.v_parameterization:
878
+ # v-parameterization training
879
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
880
+ else:
881
+ target = noise
882
+
883
+ loss = train_util.conditional_loss(
884
+ noise_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c
885
+ )
886
+ if args.masked_loss:
887
+ loss = apply_masked_loss(loss, batch)
888
+ loss = loss.mean([1, 2, 3])
889
+
890
+ loss_weights = batch["loss_weights"] # 各sampleごとのweight
891
+ loss = loss * loss_weights
892
+
893
+ if args.min_snr_gamma:
894
+ loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma, args.v_parameterization)
895
+ if args.scale_v_pred_loss_like_noise_pred:
896
+ loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler)
897
+ if args.v_pred_like_loss:
898
+ loss = add_v_prediction_like_loss(loss, timesteps, noise_scheduler, args.v_pred_like_loss)
899
+ if args.debiased_estimation_loss:
900
+ loss = apply_debiased_estimation(loss, timesteps, noise_scheduler)
901
+
902
+ loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
903
+
904
+ accelerator.backward(loss)
905
+ if accelerator.sync_gradients:
906
+ self.all_reduce_network(accelerator, network) # sync DDP grad manually
907
+ if args.max_grad_norm != 0.0:
908
+ params_to_clip = accelerator.unwrap_model(network).get_trainable_params()
909
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
910
+
911
+ optimizer.step()
912
+ lr_scheduler.step()
913
+ optimizer.zero_grad(set_to_none=True)
914
+
915
+ if args.scale_weight_norms:
916
+ keys_scaled, mean_norm, maximum_norm = accelerator.unwrap_model(network).apply_max_norm_regularization(
917
+ args.scale_weight_norms, accelerator.device
918
+ )
919
+ max_mean_logs = {"Keys Scaled": keys_scaled, "Average key norm": mean_norm}
920
+ else:
921
+ keys_scaled, mean_norm, maximum_norm = None, None, None
922
+
923
+ # Checks if the accelerator has performed an optimization step behind the scenes
924
+ if accelerator.sync_gradients:
925
+ progress_bar.update(1)
926
+ global_step += 1
927
+
928
+ self.sample_images(accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
929
+
930
+ # 指定ステップごとにモデルを保存
931
+ if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0:
932
+ accelerator.wait_for_everyone()
933
+ if accelerator.is_main_process:
934
+ ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step)
935
+ save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch)
936
+
937
+ if args.save_state:
938
+ train_util.save_and_remove_state_stepwise(args, accelerator, global_step)
939
+
940
+ remove_step_no = train_util.get_remove_step_no(args, global_step)
941
+ if remove_step_no is not None:
942
+ remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no)
943
+ remove_model(remove_ckpt_name)
944
+
945
+ current_loss = loss.detach().item()
946
+ loss_recorder.add(epoch=epoch, step=step, loss=current_loss)
947
+ avr_loss: float = loss_recorder.moving_average
948
+ logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
949
+ progress_bar.set_postfix(**logs)
950
+
951
+ if args.scale_weight_norms:
952
+ progress_bar.set_postfix(**{**max_mean_logs, **logs})
953
+
954
+ if args.logging_dir is not None:
955
+ logs = self.generate_step_logs(args, current_loss, avr_loss, lr_scheduler, keys_scaled, mean_norm, maximum_norm)
956
+ accelerator.log(logs, step=global_step)
957
+
958
+ if global_step >= args.max_train_steps:
959
+ break
960
+
961
+ if args.logging_dir is not None:
962
+ logs = {"loss/epoch": loss_recorder.moving_average}
963
+ accelerator.log(logs, step=epoch + 1)
964
+
965
+ accelerator.wait_for_everyone()
966
+
967
+ # 指定エポックごとにモデルを保存
968
+ if args.save_every_n_epochs is not None:
969
+ saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs
970
+ if is_main_process and saving:
971
+ ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1)
972
+ save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch + 1)
973
+
974
+ remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1)
975
+ if remove_epoch_no is not None:
976
+ remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no)
977
+ remove_model(remove_ckpt_name)
978
+
979
+ if args.save_state:
980
+ train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1)
981
+
982
+ self.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
983
+
984
+ # end of epoch
985
+
986
+ # metadata["ss_epoch"] = str(num_train_epochs)
987
+ metadata["ss_training_finished_at"] = str(time.time())
988
+
989
+ if is_main_process:
990
+ network = accelerator.unwrap_model(network)
991
+
992
+ accelerator.end_training()
993
+
994
+ if is_main_process and (args.save_state or args.save_state_on_train_end):
995
+ train_util.save_state_on_train_end(args, accelerator)
996
+
997
+ if is_main_process:
998
+ ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as)
999
+ save_model(ckpt_name, network, global_step, num_train_epochs, force_sync_upload=True)
1000
+
1001
+ logger.info("model saved.")
1002
+
1003
+
1004
+ def setup_parser() -> argparse.ArgumentParser:
1005
+ parser = argparse.ArgumentParser()
1006
+
1007
+ add_logging_arguments(parser)
1008
+ train_util.add_sd_models_arguments(parser)
1009
+ train_util.add_dataset_arguments(parser, True, True, True)
1010
+ train_util.add_training_arguments(parser, True)
1011
+ train_util.add_masked_loss_arguments(parser)
1012
+ deepspeed_utils.add_deepspeed_arguments(parser)
1013
+ train_util.add_optimizer_arguments(parser)
1014
+ config_util.add_config_arguments(parser)
1015
+ custom_train_functions.add_custom_train_arguments(parser)
1016
+
1017
+ parser.add_argument(
1018
+ "--no_metadata", action="store_true", help="do not save metadata in output model / メタデータを出力先モデルに保存しない"
1019
+ )
1020
+ parser.add_argument(
1021
+ "--save_model_as",
1022
+ type=str,
1023
+ default="safetensors",
1024
+ choices=[None, "ckpt", "pt", "safetensors"],
1025
+ help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)",
1026
+ )
1027
+
1028
+ parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率")
1029
+ parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率")
1030
+
1031
+ parser.add_argument(
1032
+ "--network_weights", type=str, default=None, help="pretrained weights for network / 学習するネットワークの初期重み"
1033
+ )
1034
+ parser.add_argument(
1035
+ "--network_module", type=str, default=None, help="network module to train / 学習対象のネットワークのモジュール"
1036
+ )
1037
+ parser.add_argument(
1038
+ "--network_dim",
1039
+ type=int,
1040
+ default=None,
1041
+ help="network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)",
1042
+ )
1043
+ parser.add_argument(
1044
+ "--network_alpha",
1045
+ type=float,
1046
+ default=1,
1047
+ help="alpha for LoRA weight scaling, default 1 (same as network_dim for same behavior as old version) / LoRaの重み調整のalpha値、デフォルト1(旧バージョンと同じ動作をするにはnetwork_dimと同じ値を指定)",
1048
+ )
1049
+ parser.add_argument(
1050
+ "--network_dropout",
1051
+ type=float,
1052
+ default=None,
1053
+ help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)",
1054
+ )
1055
+ parser.add_argument(
1056
+ "--network_args",
1057
+ type=str,
1058
+ default=None,
1059
+ nargs="*",
1060
+ help="additional arguments for network (key=value) / ネットワークへの追加の引数",
1061
+ )
1062
+ parser.add_argument(
1063
+ "--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する"
1064
+ )
1065
+ parser.add_argument(
1066
+ "--network_train_text_encoder_only",
1067
+ action="store_true",
1068
+ help="only training Text Encoder part / Text Encoder関連部分のみ学習する",
1069
+ )
1070
+ parser.add_argument(
1071
+ "--training_comment",
1072
+ type=str,
1073
+ default=None,
1074
+ help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列",
1075
+ )
1076
+ parser.add_argument(
1077
+ "--dim_from_weights",
1078
+ action="store_true",
1079
+ help="automatically determine dim (rank) from network_weights / dim (rank)をnetwork_weightsで指定した重みから自動で決定する",
1080
+ )
1081
+ parser.add_argument(
1082
+ "--scale_weight_norms",
1083
+ type=float,
1084
+ default=None,
1085
+ help="Scale the weight of each key pair to help prevent overtraing via exploding gradients. (1 is a good starting point) / 重みの値をスケーリングして勾配爆発を防ぐ(1が初期値としては適当)",
1086
+ )
1087
+ parser.add_argument(
1088
+ "--base_weights",
1089
+ type=str,
1090
+ default=None,
1091
+ nargs="*",
1092
+ help="network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みファイル",
1093
+ )
1094
+ parser.add_argument(
1095
+ "--base_weights_multiplier",
1096
+ type=float,
1097
+ default=None,
1098
+ nargs="*",
1099
+ help="multiplier for network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みの倍率",
1100
+ )
1101
+ parser.add_argument(
1102
+ "--no_half_vae",
1103
+ action="store_true",
1104
+ help="do not use fp16/bf16 VAE in mixed precision (use float VAE) / mixed precisionでも fp16/bf16 VAEを使わずfloat VAEを使う",
1105
+ )
1106
+ return parser
1107
+
1108
+
1109
+ if __name__ == "__main__":
1110
+ parser = setup_parser()
1111
+
1112
+ args = parser.parse_args()
1113
+ train_util.verify_command_line_training_args(args)
1114
+ args = train_util.read_config_from_file(args, parser)
1115
+
1116
+ trainer = NetworkTrainer()
1117
+ trainer.train(args)