File size: 1,450 Bytes
bc871bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
total_params: 94173316
image_size: 256
num_train_steps: -1
num_epochs: 200000
batch_size: 1
lr: 0.0001
lr_warmup_steps: 1
lr_scheduler: cosine_with_restarts
gradient_accumulation_steps: 2000
save_results_every: 100
save_model_every: 1000
dim: 128
vq_codebook_size: 8192
vq_codebook_dim: 256
seq_len: 1024
channels: 3
layers: 4
discr_layers: 4
scheduler_power: 1.0
num_cycles: 600
only_save_last_checkpoint: false
validation_image_scale: 1.0
no_center_crop: false
no_flip: false
random_crop: true
dataset_save_path: E:/cached_datasets/HF
clear_previous_experiments: false
max_grad_norm: null
discr_max_grad_norm: null
seed: 42
valid_frac: 0.05
use_ema: false
ema_beta: 0.995
ema_update_after_step: 1
ema_update_every: 1
apply_grad_penalty_every: 4
image_column: image
caption_column: caption
log_with: wandb
mixed_precision: 'no'
use_8bit_adam: false
results_dir: results/Muse-v0.11
logging_dir: null
resume_path: results\Muse-v0.11\vae.653000.pt
dataset_name: null
streaming: false
train_data_dir: F:\Hydrus Files/
checkpoint_limit: null
cond_drop_prob: 0.5
taming_model_path: null
taming_config_path: null
optimizer: Adam
weight_decay: 0.001
cache_path: null
no_cache: true
latest_checkpoint: true
do_not_save_config: false
use_l2_recon_loss: false
debug: false
config_path: null
validation_folder_at_end_of_epoch: samples/validation
exclude_folders: null
webdataset: null
project_name: muse_vae
run_name: null
wandb_user: null
hf_split_name: train
|