|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.848178867652362, |
|
"eval_steps": 500, |
|
"global_step": 20500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.970188724606323e-05, |
|
"loss": 2.5984, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9401370357014065e-05, |
|
"loss": 1.5594, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.91008534679649e-05, |
|
"loss": 1.4491, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8800336578915736e-05, |
|
"loss": 1.3519, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.849981968986657e-05, |
|
"loss": 1.2969, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.8199302800817406e-05, |
|
"loss": 1.257, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.7898785911768244e-05, |
|
"loss": 1.2217, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.759887005649718e-05, |
|
"loss": 1.2111, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.729835316744801e-05, |
|
"loss": 1.1902, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.6997836278398846e-05, |
|
"loss": 1.1554, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.669731938934968e-05, |
|
"loss": 1.1579, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6396802500300516e-05, |
|
"loss": 1.1231, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6096285611251354e-05, |
|
"loss": 1.1586, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.579636975598029e-05, |
|
"loss": 1.1038, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.5495852866931124e-05, |
|
"loss": 1.0939, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.5195335977881957e-05, |
|
"loss": 1.0662, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.4895420122610893e-05, |
|
"loss": 1.0853, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.4594903233561729e-05, |
|
"loss": 1.0293, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.4294386344512564e-05, |
|
"loss": 1.0535, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.3993869455463399e-05, |
|
"loss": 0.9944, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.3693352566414232e-05, |
|
"loss": 1.0206, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.3392835677365067e-05, |
|
"loss": 1.0107, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.3092318788315906e-05, |
|
"loss": 0.8016, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.2791801899266741e-05, |
|
"loss": 0.7445, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.2491285010217575e-05, |
|
"loss": 0.7645, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.219076812116841e-05, |
|
"loss": 0.7633, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.1890852265897344e-05, |
|
"loss": 0.7529, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.1590335376848181e-05, |
|
"loss": 0.7402, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.1289818487799016e-05, |
|
"loss": 0.7461, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.0989301598749851e-05, |
|
"loss": 0.7572, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.0689385743478784e-05, |
|
"loss": 0.7287, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.0388868854429619e-05, |
|
"loss": 0.7596, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.0088351965380456e-05, |
|
"loss": 0.7507, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.787835076331291e-06, |
|
"loss": 0.7326, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.487919221060224e-06, |
|
"loss": 0.7588, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.187402332011059e-06, |
|
"loss": 0.7407, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.886885442961896e-06, |
|
"loss": 0.7192, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.58636855391273e-06, |
|
"loss": 0.7622, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 8.285851664863566e-06, |
|
"loss": 0.6851, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.985334775814401e-06, |
|
"loss": 0.7364, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 7.684817886765236e-06, |
|
"loss": 0.7051, |
|
"step": 20500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 33276, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.2138129270555136e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|