|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.876543209876543, |
|
"eval_steps": 500, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6172839506172839, |
|
"grad_norm": 14.113041877746582, |
|
"learning_rate": 9.953000000000001e-06, |
|
"loss": 9.3136, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2345679012345678, |
|
"grad_norm": 11.032143592834473, |
|
"learning_rate": 9.903e-06, |
|
"loss": 8.8439, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8518518518518519, |
|
"grad_norm": 21.83379554748535, |
|
"learning_rate": 9.853e-06, |
|
"loss": 8.9582, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"grad_norm": 13.823728561401367, |
|
"learning_rate": 9.803e-06, |
|
"loss": 8.5429, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.0864197530864197, |
|
"grad_norm": 14.090177536010742, |
|
"learning_rate": 9.753e-06, |
|
"loss": 8.0873, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.7037037037037037, |
|
"grad_norm": 16.695327758789062, |
|
"learning_rate": 9.703000000000002e-06, |
|
"loss": 7.5718, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.320987654320987, |
|
"grad_norm": 12.39268970489502, |
|
"learning_rate": 9.653e-06, |
|
"loss": 7.2107, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 20.64848518371582, |
|
"learning_rate": 9.603000000000001e-06, |
|
"loss": 6.633, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 10.930610656738281, |
|
"learning_rate": 9.553000000000002e-06, |
|
"loss": 6.3085, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.172839506172839, |
|
"grad_norm": 7.421260833740234, |
|
"learning_rate": 9.503e-06, |
|
"loss": 5.9761, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.790123456790123, |
|
"grad_norm": 19.653493881225586, |
|
"learning_rate": 9.453e-06, |
|
"loss": 6.0252, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.407407407407407, |
|
"grad_norm": 10.195016860961914, |
|
"learning_rate": 9.403000000000001e-06, |
|
"loss": 5.9531, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 8.024691358024691, |
|
"grad_norm": 15.588345527648926, |
|
"learning_rate": 9.353000000000002e-06, |
|
"loss": 5.979, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.641975308641975, |
|
"grad_norm": 11.010184288024902, |
|
"learning_rate": 9.303e-06, |
|
"loss": 5.9684, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 9.25925925925926, |
|
"grad_norm": 12.536541938781738, |
|
"learning_rate": 9.253000000000001e-06, |
|
"loss": 6.0309, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 9.876543209876543, |
|
"grad_norm": 10.15280818939209, |
|
"learning_rate": 9.203000000000002e-06, |
|
"loss": 5.7873, |
|
"step": 800 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 124, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.057945329664e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|