|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.7079303675048356, |
|
"eval_steps": 50, |
|
"global_step": 1400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09671179883945841, |
|
"grad_norm": 0.035103704780340195, |
|
"learning_rate": 0.00016025641025641028, |
|
"loss": 1.0483, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09671179883945841, |
|
"eval_loss": 0.9260319471359253, |
|
"eval_runtime": 60.9012, |
|
"eval_samples_per_second": 1.642, |
|
"eval_steps_per_second": 0.213, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.19342359767891681, |
|
"grad_norm": 0.030684856697916985, |
|
"learning_rate": 0.00032051282051282057, |
|
"loss": 0.8577, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19342359767891681, |
|
"eval_loss": 0.820220410823822, |
|
"eval_runtime": 56.5241, |
|
"eval_samples_per_second": 1.769, |
|
"eval_steps_per_second": 0.23, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2901353965183752, |
|
"grad_norm": 0.02802114002406597, |
|
"learning_rate": 0.0004807692307692308, |
|
"loss": 0.7996, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2901353965183752, |
|
"eval_loss": 0.7894760370254517, |
|
"eval_runtime": 56.6018, |
|
"eval_samples_per_second": 1.767, |
|
"eval_steps_per_second": 0.23, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.38684719535783363, |
|
"grad_norm": 0.03189089894294739, |
|
"learning_rate": 0.0004842293906810036, |
|
"loss": 0.7802, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.38684719535783363, |
|
"eval_loss": 0.7783958315849304, |
|
"eval_runtime": 56.5799, |
|
"eval_samples_per_second": 1.767, |
|
"eval_steps_per_second": 0.23, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4835589941972921, |
|
"grad_norm": 0.03580320626497269, |
|
"learning_rate": 0.00046630824372759856, |
|
"loss": 0.7671, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4835589941972921, |
|
"eval_loss": 0.7721498012542725, |
|
"eval_runtime": 57.9466, |
|
"eval_samples_per_second": 1.726, |
|
"eval_steps_per_second": 0.224, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5802707930367504, |
|
"grad_norm": 0.034203894436359406, |
|
"learning_rate": 0.00044838709677419355, |
|
"loss": 0.761, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5802707930367504, |
|
"eval_loss": 0.7687731385231018, |
|
"eval_runtime": 60.6867, |
|
"eval_samples_per_second": 1.648, |
|
"eval_steps_per_second": 0.214, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6769825918762089, |
|
"grad_norm": 0.03307868540287018, |
|
"learning_rate": 0.00043046594982078853, |
|
"loss": 0.7587, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6769825918762089, |
|
"eval_loss": 0.766252338886261, |
|
"eval_runtime": 56.5991, |
|
"eval_samples_per_second": 1.767, |
|
"eval_steps_per_second": 0.23, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7736943907156673, |
|
"grad_norm": 0.03772694244980812, |
|
"learning_rate": 0.0004125448028673835, |
|
"loss": 0.7529, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7736943907156673, |
|
"eval_loss": 0.7637045383453369, |
|
"eval_runtime": 56.5561, |
|
"eval_samples_per_second": 1.768, |
|
"eval_steps_per_second": 0.23, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8704061895551257, |
|
"grad_norm": 0.04153395816683769, |
|
"learning_rate": 0.00039462365591397855, |
|
"loss": 0.7562, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8704061895551257, |
|
"eval_loss": 0.7616338133811951, |
|
"eval_runtime": 56.6718, |
|
"eval_samples_per_second": 1.765, |
|
"eval_steps_per_second": 0.229, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9671179883945842, |
|
"grad_norm": 0.03528429567813873, |
|
"learning_rate": 0.00037670250896057353, |
|
"loss": 0.7507, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9671179883945842, |
|
"eval_loss": 0.7601596117019653, |
|
"eval_runtime": 56.9725, |
|
"eval_samples_per_second": 1.755, |
|
"eval_steps_per_second": 0.228, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0638297872340425, |
|
"grad_norm": 0.03503500670194626, |
|
"learning_rate": 0.0003587813620071685, |
|
"loss": 0.7274, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.0638297872340425, |
|
"eval_loss": 0.7588610053062439, |
|
"eval_runtime": 60.6429, |
|
"eval_samples_per_second": 1.649, |
|
"eval_steps_per_second": 0.214, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1605415860735009, |
|
"grad_norm": 0.0375722236931324, |
|
"learning_rate": 0.0003408602150537635, |
|
"loss": 0.7422, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1605415860735009, |
|
"eval_loss": 0.7574188113212585, |
|
"eval_runtime": 56.7788, |
|
"eval_samples_per_second": 1.761, |
|
"eval_steps_per_second": 0.229, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2572533849129595, |
|
"grad_norm": 0.04027143493294716, |
|
"learning_rate": 0.0003229390681003584, |
|
"loss": 0.735, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.2572533849129595, |
|
"eval_loss": 0.7570986747741699, |
|
"eval_runtime": 56.5753, |
|
"eval_samples_per_second": 1.768, |
|
"eval_steps_per_second": 0.23, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3539651837524178, |
|
"grad_norm": 0.04008708521723747, |
|
"learning_rate": 0.0003050179211469534, |
|
"loss": 0.7367, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.3539651837524178, |
|
"eval_loss": 0.7555394172668457, |
|
"eval_runtime": 56.5444, |
|
"eval_samples_per_second": 1.769, |
|
"eval_steps_per_second": 0.23, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.4506769825918762, |
|
"grad_norm": 0.037061236798763275, |
|
"learning_rate": 0.0002870967741935484, |
|
"loss": 0.7471, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.4506769825918762, |
|
"eval_loss": 0.7549133896827698, |
|
"eval_runtime": 56.529, |
|
"eval_samples_per_second": 1.769, |
|
"eval_steps_per_second": 0.23, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.5473887814313345, |
|
"grad_norm": 0.04119856655597687, |
|
"learning_rate": 0.00026917562724014337, |
|
"loss": 0.7404, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5473887814313345, |
|
"eval_loss": 0.7541146278381348, |
|
"eval_runtime": 60.5765, |
|
"eval_samples_per_second": 1.651, |
|
"eval_steps_per_second": 0.215, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.644100580270793, |
|
"grad_norm": 0.041316960006952286, |
|
"learning_rate": 0.00025125448028673835, |
|
"loss": 0.742, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.644100580270793, |
|
"eval_loss": 0.753339409828186, |
|
"eval_runtime": 56.7119, |
|
"eval_samples_per_second": 1.763, |
|
"eval_steps_per_second": 0.229, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.7408123791102514, |
|
"grad_norm": 0.04064641892910004, |
|
"learning_rate": 0.00023333333333333333, |
|
"loss": 0.7385, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.7408123791102514, |
|
"eval_loss": 0.7530195116996765, |
|
"eval_runtime": 56.5303, |
|
"eval_samples_per_second": 1.769, |
|
"eval_steps_per_second": 0.23, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8375241779497098, |
|
"grad_norm": 0.03964291140437126, |
|
"learning_rate": 0.0002154121863799283, |
|
"loss": 0.7352, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.8375241779497098, |
|
"eval_loss": 0.7525290846824646, |
|
"eval_runtime": 56.698, |
|
"eval_samples_per_second": 1.764, |
|
"eval_steps_per_second": 0.229, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.9342359767891684, |
|
"grad_norm": 0.046565938740968704, |
|
"learning_rate": 0.0001974910394265233, |
|
"loss": 0.7323, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.9342359767891684, |
|
"eval_loss": 0.7516247630119324, |
|
"eval_runtime": 56.5619, |
|
"eval_samples_per_second": 1.768, |
|
"eval_steps_per_second": 0.23, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0309477756286265, |
|
"grad_norm": 0.04249171167612076, |
|
"learning_rate": 0.00017956989247311828, |
|
"loss": 0.7328, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.0309477756286265, |
|
"eval_loss": 0.7514955997467041, |
|
"eval_runtime": 60.5727, |
|
"eval_samples_per_second": 1.651, |
|
"eval_steps_per_second": 0.215, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.127659574468085, |
|
"grad_norm": 0.04245985299348831, |
|
"learning_rate": 0.00016164874551971326, |
|
"loss": 0.7264, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.127659574468085, |
|
"eval_loss": 0.7509506344795227, |
|
"eval_runtime": 56.8661, |
|
"eval_samples_per_second": 1.759, |
|
"eval_steps_per_second": 0.229, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.2243713733075436, |
|
"grad_norm": 0.04050629585981369, |
|
"learning_rate": 0.00014372759856630824, |
|
"loss": 0.704, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.2243713733075436, |
|
"eval_loss": 0.7505396008491516, |
|
"eval_runtime": 56.5726, |
|
"eval_samples_per_second": 1.768, |
|
"eval_steps_per_second": 0.23, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.3210831721470018, |
|
"grad_norm": 0.043631162494421005, |
|
"learning_rate": 0.00012580645161290322, |
|
"loss": 0.7242, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.3210831721470018, |
|
"eval_loss": 0.751037061214447, |
|
"eval_runtime": 56.6798, |
|
"eval_samples_per_second": 1.764, |
|
"eval_steps_per_second": 0.229, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.4177949709864603, |
|
"grad_norm": 0.04337254539132118, |
|
"learning_rate": 0.0001078853046594982, |
|
"loss": 0.7203, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.4177949709864603, |
|
"eval_loss": 0.7501779794692993, |
|
"eval_runtime": 56.6502, |
|
"eval_samples_per_second": 1.765, |
|
"eval_steps_per_second": 0.229, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.514506769825919, |
|
"grad_norm": 0.04406700283288956, |
|
"learning_rate": 8.996415770609319e-05, |
|
"loss": 0.7285, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.514506769825919, |
|
"eval_loss": 0.7498863339424133, |
|
"eval_runtime": 60.6054, |
|
"eval_samples_per_second": 1.65, |
|
"eval_steps_per_second": 0.215, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.611218568665377, |
|
"grad_norm": 0.04529803246259689, |
|
"learning_rate": 7.204301075268817e-05, |
|
"loss": 0.7192, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.611218568665377, |
|
"eval_loss": 0.7502144575119019, |
|
"eval_runtime": 57.5261, |
|
"eval_samples_per_second": 1.738, |
|
"eval_steps_per_second": 0.226, |
|
"step": 1350 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1551, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7515113721473024e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|