|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 2550, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 7.361999999999999e-05, |
|
"loss": 5.3118, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_loss": 2.9093422889709473, |
|
"eval_runtime": 17.6486, |
|
"eval_samples_per_second": 19.322, |
|
"eval_steps_per_second": 2.436, |
|
"eval_wer": 0.9981549815498155, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 7.062e-05, |
|
"loss": 2.2071, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"eval_loss": 1.1736845970153809, |
|
"eval_runtime": 17.7952, |
|
"eval_samples_per_second": 19.163, |
|
"eval_steps_per_second": 2.416, |
|
"eval_wer": 0.777859778597786, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 6.761999999999999e-05, |
|
"loss": 1.6098, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"eval_loss": 0.9983930587768555, |
|
"eval_runtime": 17.6369, |
|
"eval_samples_per_second": 19.334, |
|
"eval_steps_per_second": 2.438, |
|
"eval_wer": 0.7014760147601476, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 6.462e-05, |
|
"loss": 1.4333, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"eval_loss": 0.9799550771713257, |
|
"eval_runtime": 17.5884, |
|
"eval_samples_per_second": 19.388, |
|
"eval_steps_per_second": 2.445, |
|
"eval_wer": 0.670479704797048, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 6.162e-05, |
|
"loss": 1.2859, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"eval_loss": 0.9582107067108154, |
|
"eval_runtime": 17.5894, |
|
"eval_samples_per_second": 19.387, |
|
"eval_steps_per_second": 2.445, |
|
"eval_wer": 0.6487084870848708, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 5.861999999999999e-05, |
|
"loss": 1.2073, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"eval_loss": 0.8841160535812378, |
|
"eval_runtime": 17.5809, |
|
"eval_samples_per_second": 19.396, |
|
"eval_steps_per_second": 2.446, |
|
"eval_wer": 0.6077490774907749, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"learning_rate": 5.562e-05, |
|
"loss": 1.1417, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"eval_loss": 0.9118468165397644, |
|
"eval_runtime": 17.5423, |
|
"eval_samples_per_second": 19.439, |
|
"eval_steps_per_second": 2.451, |
|
"eval_wer": 0.6343173431734317, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 15.69, |
|
"learning_rate": 5.2619999999999994e-05, |
|
"loss": 1.0988, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 15.69, |
|
"eval_loss": 0.9216808080673218, |
|
"eval_runtime": 17.5542, |
|
"eval_samples_per_second": 19.426, |
|
"eval_steps_per_second": 2.45, |
|
"eval_wer": 0.6195571955719558, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 4.9619999999999996e-05, |
|
"loss": 1.0279, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"eval_loss": 0.9165470600128174, |
|
"eval_runtime": 17.6001, |
|
"eval_samples_per_second": 19.375, |
|
"eval_steps_per_second": 2.443, |
|
"eval_wer": 0.5867158671586716, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 19.61, |
|
"learning_rate": 4.662e-05, |
|
"loss": 0.9765, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 19.61, |
|
"eval_loss": 0.9305969476699829, |
|
"eval_runtime": 17.6088, |
|
"eval_samples_per_second": 19.365, |
|
"eval_steps_per_second": 2.442, |
|
"eval_wer": 0.5977859778597786, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 21.57, |
|
"learning_rate": 4.362e-05, |
|
"loss": 0.9161, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 21.57, |
|
"eval_loss": 0.9304590225219727, |
|
"eval_runtime": 17.4967, |
|
"eval_samples_per_second": 19.489, |
|
"eval_steps_per_second": 2.458, |
|
"eval_wer": 0.5767527675276752, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 23.53, |
|
"learning_rate": 4.0619999999999994e-05, |
|
"loss": 0.8395, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 23.53, |
|
"eval_loss": 0.982844889163971, |
|
"eval_runtime": 17.7044, |
|
"eval_samples_per_second": 19.261, |
|
"eval_steps_per_second": 2.429, |
|
"eval_wer": 0.5819188191881919, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 25.49, |
|
"learning_rate": 3.762e-05, |
|
"loss": 0.8306, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 25.49, |
|
"eval_loss": 0.9397062063217163, |
|
"eval_runtime": 17.6627, |
|
"eval_samples_per_second": 19.306, |
|
"eval_steps_per_second": 2.435, |
|
"eval_wer": 0.5760147601476014, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 27.45, |
|
"learning_rate": 3.462e-05, |
|
"loss": 0.7819, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 27.45, |
|
"eval_loss": 0.9543871283531189, |
|
"eval_runtime": 17.6517, |
|
"eval_samples_per_second": 19.318, |
|
"eval_steps_per_second": 2.436, |
|
"eval_wer": 0.574169741697417, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 29.41, |
|
"learning_rate": 3.161999999999999e-05, |
|
"loss": 0.7509, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 29.41, |
|
"eval_loss": 0.9278193712234497, |
|
"eval_runtime": 17.4199, |
|
"eval_samples_per_second": 19.575, |
|
"eval_steps_per_second": 2.468, |
|
"eval_wer": 0.5690036900369003, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 31.37, |
|
"learning_rate": 2.8619999999999997e-05, |
|
"loss": 0.7218, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 31.37, |
|
"eval_loss": 0.9002724289894104, |
|
"eval_runtime": 17.4813, |
|
"eval_samples_per_second": 19.507, |
|
"eval_steps_per_second": 2.46, |
|
"eval_wer": 0.5586715867158671, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 2.562e-05, |
|
"loss": 0.6725, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_loss": 0.9659368395805359, |
|
"eval_runtime": 17.6287, |
|
"eval_samples_per_second": 19.343, |
|
"eval_steps_per_second": 2.439, |
|
"eval_wer": 0.5553505535055351, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 35.29, |
|
"learning_rate": 2.2619999999999997e-05, |
|
"loss": 0.6287, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 35.29, |
|
"eval_loss": 0.9521807432174683, |
|
"eval_runtime": 17.6153, |
|
"eval_samples_per_second": 19.358, |
|
"eval_steps_per_second": 2.441, |
|
"eval_wer": 0.5560885608856089, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 37.25, |
|
"learning_rate": 1.962e-05, |
|
"loss": 0.6077, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 37.25, |
|
"eval_loss": 0.9153698086738586, |
|
"eval_runtime": 17.5781, |
|
"eval_samples_per_second": 19.399, |
|
"eval_steps_per_second": 2.446, |
|
"eval_wer": 0.5464944649446495, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 39.22, |
|
"learning_rate": 1.6619999999999997e-05, |
|
"loss": 0.5873, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 39.22, |
|
"eval_loss": 0.9330592751502991, |
|
"eval_runtime": 17.6281, |
|
"eval_samples_per_second": 19.344, |
|
"eval_steps_per_second": 2.439, |
|
"eval_wer": 0.5468634686346864, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 41.18, |
|
"learning_rate": 1.362e-05, |
|
"loss": 0.5621, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 41.18, |
|
"eval_loss": 0.9334675669670105, |
|
"eval_runtime": 17.5009, |
|
"eval_samples_per_second": 19.485, |
|
"eval_steps_per_second": 2.457, |
|
"eval_wer": 0.5490774907749078, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 43.14, |
|
"learning_rate": 1.062e-05, |
|
"loss": 0.5168, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 43.14, |
|
"eval_loss": 0.9632032513618469, |
|
"eval_runtime": 17.6433, |
|
"eval_samples_per_second": 19.327, |
|
"eval_steps_per_second": 2.437, |
|
"eval_wer": 0.5457564575645757, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"learning_rate": 7.619999999999999e-06, |
|
"loss": 0.5114, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"eval_loss": 0.9349461793899536, |
|
"eval_runtime": 17.5705, |
|
"eval_samples_per_second": 19.407, |
|
"eval_steps_per_second": 2.447, |
|
"eval_wer": 0.5387453874538746, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"learning_rate": 4.62e-06, |
|
"loss": 0.4986, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"eval_loss": 0.9364362955093384, |
|
"eval_runtime": 17.6211, |
|
"eval_samples_per_second": 19.352, |
|
"eval_steps_per_second": 2.44, |
|
"eval_wer": 0.5380073800738008, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 49.02, |
|
"learning_rate": 1.62e-06, |
|
"loss": 0.4761, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 49.02, |
|
"eval_loss": 0.9584243297576904, |
|
"eval_runtime": 17.5152, |
|
"eval_samples_per_second": 19.469, |
|
"eval_steps_per_second": 2.455, |
|
"eval_wer": 0.5391143911439115, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 2550, |
|
"total_flos": 1.5012316680494998e+19, |
|
"train_loss": 1.0758988070020488, |
|
"train_runtime": 4106.4423, |
|
"train_samples_per_second": 9.863, |
|
"train_steps_per_second": 0.621 |
|
} |
|
], |
|
"max_steps": 2550, |
|
"num_train_epochs": 50, |
|
"total_flos": 1.5012316680494998e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|