File size: 1,246 Bytes
7c515f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
model_train_type = "sd-lora"
pretrained_model_name_or_path = "E:/Softwares/lora-scripts/sd-models/animefull-final-pruned.safetensors"
v2 = false
train_data_dir = "E:/Softwares/lora-scripts/train/Matchach"
prior_loss_weight = 1
resolution = "512,512"
enable_bucket = true
min_bucket_reso = 256
max_bucket_reso = 1024
bucket_reso_steps = 64
output_name = "matchach_1"
output_dir = "./output"
save_model_as = "safetensors"
save_precision = "fp16"
save_every_n_epochs = 2
max_train_epochs = 10
train_batch_size = 1
gradient_checkpointing = false
network_train_unet_only = false
network_train_text_encoder_only = false
learning_rate = 0.0001
unet_lr = 0.0001
text_encoder_lr = 0.00001
lr_scheduler = "cosine_with_restarts"
lr_warmup_steps = 0
lr_scheduler_num_cycles = 1
optimizer_type = "AdamW8bit"
network_module = "lycoris.kohya"
network_dim = 64
network_alpha = 32
log_with = "tensorboard"
logging_dir = "./logs"
caption_extension = ".txt"
shuffle_caption = true
keep_tokens = 0
max_token_length = 255
seed = 1337
clip_skip = 2
mixed_precision = "fp16"
xformers = true
lowram = false
cache_latents = true
cache_latents_to_disk = true
persistent_data_loader_workers = true
network_args = [ "conv_dim=4", "conv_alpha=1", "dropout=0", "algo=locon" ]