|
{ |
|
"micro_batch_size": 1, |
|
"gradient_accumulation_steps": 1, |
|
"num_train_epochs": 3, |
|
"learning_rate": 0.0003, |
|
"cutoff_len": 512, |
|
"val_set_size": 0, |
|
"lora_r": 4, |
|
"lora_alpha": 16, |
|
"lora_dropout": 0.03, |
|
"lora_target_modules": [ |
|
"q_proj", |
|
"v_proj" |
|
], |
|
"lora_modules_to_save": [], |
|
"train_on_inputs": true, |
|
"group_by_length": false, |
|
"load_in_8bit": false, |
|
"fp16": true, |
|
"bf16": false, |
|
"gradient_checkpointing": false, |
|
"save_steps": 250, |
|
"save_total_limit": 2, |
|
"logging_steps": 10, |
|
"additional_training_arguments": null, |
|
"additional_lora_config": null, |
|
"resume_from_checkpoint": "/root/LLaMA-LoRA-Tuner/data/lora_models/ru-llama-prefinish/checkpoint-18500" |
|
} |