|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 12300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 29.442007064819336, |
|
"learning_rate": 4.796747967479675e-05, |
|
"loss": 1.1714, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 17.30375099182129, |
|
"learning_rate": 4.59349593495935e-05, |
|
"loss": 0.5578, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 9.899393081665039, |
|
"learning_rate": 4.390243902439025e-05, |
|
"loss": 0.4015, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 8.312630653381348, |
|
"learning_rate": 4.186991869918699e-05, |
|
"loss": 0.3135, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 7.51059627532959, |
|
"learning_rate": 3.983739837398374e-05, |
|
"loss": 0.2809, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 5.824585914611816, |
|
"learning_rate": 3.780487804878049e-05, |
|
"loss": 0.2045, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 20.64141845703125, |
|
"learning_rate": 3.577235772357724e-05, |
|
"loss": 0.1899, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"grad_norm": 4.098996162414551, |
|
"learning_rate": 3.373983739837399e-05, |
|
"loss": 0.156, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"grad_norm": 11.682610511779785, |
|
"learning_rate": 3.170731707317073e-05, |
|
"loss": 0.1367, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"grad_norm": 1.980808138847351, |
|
"learning_rate": 2.9674796747967482e-05, |
|
"loss": 0.1321, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"grad_norm": 21.24442481994629, |
|
"learning_rate": 2.764227642276423e-05, |
|
"loss": 0.1024, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"grad_norm": 10.403449058532715, |
|
"learning_rate": 2.5609756097560977e-05, |
|
"loss": 0.0965, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"grad_norm": 6.274624824523926, |
|
"learning_rate": 2.3577235772357724e-05, |
|
"loss": 0.0811, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"grad_norm": 18.304744720458984, |
|
"learning_rate": 2.1544715447154475e-05, |
|
"loss": 0.0787, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"grad_norm": 6.117405891418457, |
|
"learning_rate": 1.9512195121951222e-05, |
|
"loss": 0.0636, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"grad_norm": 6.909726619720459, |
|
"learning_rate": 1.747967479674797e-05, |
|
"loss": 0.0578, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"grad_norm": 7.566128253936768, |
|
"learning_rate": 1.5447154471544717e-05, |
|
"loss": 0.0564, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"grad_norm": 3.8233773708343506, |
|
"learning_rate": 1.3414634146341466e-05, |
|
"loss": 0.0474, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"grad_norm": 9.988646507263184, |
|
"learning_rate": 1.1382113821138211e-05, |
|
"loss": 0.0394, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"grad_norm": 7.508295059204102, |
|
"learning_rate": 9.34959349593496e-06, |
|
"loss": 0.0339, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"grad_norm": 8.255553245544434, |
|
"learning_rate": 7.317073170731707e-06, |
|
"loss": 0.0279, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"grad_norm": 0.14910294115543365, |
|
"learning_rate": 5.2845528455284555e-06, |
|
"loss": 0.0248, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"grad_norm": 0.014114579185843468, |
|
"learning_rate": 3.2520325203252037e-06, |
|
"loss": 0.018, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"grad_norm": 0.12875856459140778, |
|
"learning_rate": 1.2195121951219514e-06, |
|
"loss": 0.0182, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 12300, |
|
"total_flos": 7.078965077743022e+16, |
|
"train_loss": 0.17480758806554283, |
|
"train_runtime": 299478.6207, |
|
"train_samples_per_second": 1.314, |
|
"train_steps_per_second": 0.041 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 12300, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 7.078965077743022e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|