|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0023797217956488276, |
|
"eval_steps": 2000, |
|
"global_step": 3200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 8.451894760131836, |
|
"learning_rate": 1.9999999959757473e-05, |
|
"loss": 1.835, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.7373883724212646, |
|
"learning_rate": 1.9999999832252933e-05, |
|
"loss": 1.6278, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.7490854263305664, |
|
"learning_rate": 1.9999999617416517e-05, |
|
"loss": 1.6314, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 10.143038749694824, |
|
"learning_rate": 1.999999931524823e-05, |
|
"loss": 1.5416, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.783194065093994, |
|
"learning_rate": 1.999999892574807e-05, |
|
"loss": 1.5775, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.1446919441223145, |
|
"learning_rate": 1.9999998448916044e-05, |
|
"loss": 1.6922, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.6168997287750244, |
|
"learning_rate": 1.9999997884752155e-05, |
|
"loss": 1.6211, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.068266868591309, |
|
"learning_rate": 1.9999997233256404e-05, |
|
"loss": 1.6001, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.046320676803589, |
|
"learning_rate": 1.9999996494428805e-05, |
|
"loss": 1.5682, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.574249267578125, |
|
"learning_rate": 1.9999995668269356e-05, |
|
"loss": 1.5658, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.401742935180664, |
|
"learning_rate": 1.999999475956276e-05, |
|
"loss": 1.6152, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.141517162322998, |
|
"learning_rate": 1.9999993759176304e-05, |
|
"loss": 1.564, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.8213422298431396, |
|
"learning_rate": 1.9999992671458023e-05, |
|
"loss": 1.5586, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.3063032627105713, |
|
"learning_rate": 1.999999149640793e-05, |
|
"loss": 1.6118, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.5887880325317383, |
|
"learning_rate": 1.9999990234026036e-05, |
|
"loss": 1.586, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.8140385150909424, |
|
"learning_rate": 1.9999988884312347e-05, |
|
"loss": 1.6221, |
|
"step": 3200 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 6723475, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 200, |
|
"total_flos": 4.180089275793408e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|