ckpts / trainer_state.json
Gizachew's picture
End of training
016fb89 verified
raw
history blame
3.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.98989898989899,
"eval_steps": 500,
"global_step": 1235,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4,
"grad_norm": 4.1816935539245605,
"learning_rate": 9.206477732793523e-06,
"loss": 2.284,
"step": 100
},
{
"epoch": 0.81,
"grad_norm": 7.234564304351807,
"learning_rate": 8.39676113360324e-06,
"loss": 1.1378,
"step": 200
},
{
"epoch": 1.21,
"grad_norm": 4.295170783996582,
"learning_rate": 7.587044534412956e-06,
"loss": 0.7549,
"step": 300
},
{
"epoch": 1.62,
"grad_norm": 2.47021746635437,
"learning_rate": 6.777327935222673e-06,
"loss": 0.545,
"step": 400
},
{
"epoch": 2.02,
"grad_norm": 4.9068498611450195,
"learning_rate": 5.9676113360323896e-06,
"loss": 0.4039,
"step": 500
},
{
"epoch": 2.02,
"eval_accuracy": 0.8949494957923889,
"eval_loss": 0.3197959065437317,
"eval_runtime": 43.5773,
"eval_samples_per_second": 11.359,
"eval_steps_per_second": 2.846,
"step": 500
},
{
"epoch": 2.42,
"grad_norm": 10.178821563720703,
"learning_rate": 5.157894736842106e-06,
"loss": 0.3572,
"step": 600
},
{
"epoch": 2.83,
"grad_norm": 22.03152847290039,
"learning_rate": 4.348178137651822e-06,
"loss": 0.3237,
"step": 700
},
{
"epoch": 3.23,
"grad_norm": 4.856521129608154,
"learning_rate": 3.538461538461539e-06,
"loss": 0.2635,
"step": 800
},
{
"epoch": 3.64,
"grad_norm": 4.2341766357421875,
"learning_rate": 2.7368421052631583e-06,
"loss": 0.2138,
"step": 900
},
{
"epoch": 4.04,
"grad_norm": 0.4156922996044159,
"learning_rate": 1.9271255060728746e-06,
"loss": 0.2195,
"step": 1000
},
{
"epoch": 4.04,
"eval_accuracy": 0.9333333373069763,
"eval_loss": 0.2362980842590332,
"eval_runtime": 43.7442,
"eval_samples_per_second": 11.316,
"eval_steps_per_second": 2.835,
"step": 1000
},
{
"epoch": 4.44,
"grad_norm": 5.4564337730407715,
"learning_rate": 1.1174089068825912e-06,
"loss": 0.2312,
"step": 1100
},
{
"epoch": 4.85,
"grad_norm": 2.0700230598449707,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.182,
"step": 1200
},
{
"epoch": 4.99,
"step": 1235,
"total_flos": 1.053348926143392e+18,
"train_loss": 0.5674189640925481,
"train_runtime": 1664.3033,
"train_samples_per_second": 5.945,
"train_steps_per_second": 0.742
}
],
"logging_steps": 100,
"max_steps": 1235,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.053348926143392e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}