|
{ |
|
"best_metric": 1.24324631690979, |
|
"best_model_checkpoint": "data/Llama-31-8B_task-1_120-samples_config-1/checkpoint-55", |
|
"epoch": 12.0, |
|
"eval_steps": 500, |
|
"global_step": 132, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 1.947190523147583, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 2.2569, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 1.950323462486267, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 2.3228, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 1.7120546102523804, |
|
"learning_rate": 7.272727272727272e-06, |
|
"loss": 2.076, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 1.5879558324813843, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 2.0525, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 1.8351306915283203, |
|
"learning_rate": 1.4545454545454545e-05, |
|
"loss": 2.1059, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 1.7258864641189575, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 1.8587, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.8961697816848755, |
|
"eval_runtime": 9.6373, |
|
"eval_samples_per_second": 2.49, |
|
"eval_steps_per_second": 2.49, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.5399166345596313, |
|
"learning_rate": 2.1818181818181818e-05, |
|
"loss": 1.7527, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 1.6511270999908447, |
|
"learning_rate": 2.5454545454545454e-05, |
|
"loss": 1.8337, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.104617714881897, |
|
"learning_rate": 2.909090909090909e-05, |
|
"loss": 1.7403, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 0.8533787131309509, |
|
"learning_rate": 3.272727272727273e-05, |
|
"loss": 1.5971, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 0.8006373047828674, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 1.503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.7819918394088745, |
|
"learning_rate": 4e-05, |
|
"loss": 1.5582, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.5353227853775024, |
|
"eval_runtime": 9.6335, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 0.713280200958252, |
|
"learning_rate": 4.3636363636363636e-05, |
|
"loss": 1.5487, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.3636363636363638, |
|
"grad_norm": 0.6750282645225525, |
|
"learning_rate": 4.7272727272727275e-05, |
|
"loss": 1.4773, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 0.7663286924362183, |
|
"learning_rate": 5.090909090909091e-05, |
|
"loss": 1.3698, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 0.6964442133903503, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 1.2677, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 0.7686213850975037, |
|
"learning_rate": 5.818181818181818e-05, |
|
"loss": 1.3116, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.3440297842025757, |
|
"eval_runtime": 9.6307, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.090909090909091, |
|
"grad_norm": 0.7271563410758972, |
|
"learning_rate": 6.181818181818182e-05, |
|
"loss": 1.3272, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.7131862640380859, |
|
"learning_rate": 6.545454545454546e-05, |
|
"loss": 1.2423, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.4545454545454546, |
|
"grad_norm": 0.8657882809638977, |
|
"learning_rate": 6.90909090909091e-05, |
|
"loss": 1.2197, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.8501375317573547, |
|
"learning_rate": 7.272727272727273e-05, |
|
"loss": 1.1791, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8181818181818183, |
|
"grad_norm": 0.8458476662635803, |
|
"learning_rate": 7.636363636363637e-05, |
|
"loss": 1.1082, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.8599070906639099, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1103, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.248623251914978, |
|
"eval_runtime": 9.6225, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.181818181818182, |
|
"grad_norm": 0.9287438988685608, |
|
"learning_rate": 8.363636363636364e-05, |
|
"loss": 0.991, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.0513927936553955, |
|
"learning_rate": 8.727272727272727e-05, |
|
"loss": 0.9697, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 1.1929577589035034, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 1.0229, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.1892277002334595, |
|
"learning_rate": 9.454545454545455e-05, |
|
"loss": 0.9632, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"grad_norm": 1.2755712270736694, |
|
"learning_rate": 9.818181818181818e-05, |
|
"loss": 0.9015, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.24324631690979, |
|
"eval_runtime": 9.6237, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.26650071144104, |
|
"learning_rate": 9.999899300364532e-05, |
|
"loss": 0.871, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.2727272727272725, |
|
"grad_norm": 1.2812609672546387, |
|
"learning_rate": 9.99909372761763e-05, |
|
"loss": 0.7046, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.5597699880599976, |
|
"learning_rate": 9.997482711915927e-05, |
|
"loss": 0.6681, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.636363636363637, |
|
"grad_norm": 1.8858792781829834, |
|
"learning_rate": 9.99506651282272e-05, |
|
"loss": 0.7369, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.7140628099441528, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.6761, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.500331163406372, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.6339, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.3448349237442017, |
|
"eval_runtime": 9.633, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.3713549375534058, |
|
"learning_rate": 9.982991356370404e-05, |
|
"loss": 0.527, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 1.5518769025802612, |
|
"learning_rate": 9.977359612865423e-05, |
|
"loss": 0.4046, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 2.7726385593414307, |
|
"learning_rate": 9.970925928158274e-05, |
|
"loss": 0.3872, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.7272727272727275, |
|
"grad_norm": 3.0638201236724854, |
|
"learning_rate": 9.963691338830044e-05, |
|
"loss": 0.4758, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 1.8400838375091553, |
|
"learning_rate": 9.955657010501806e-05, |
|
"loss": 0.2953, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.664857268333435, |
|
"eval_runtime": 9.6249, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.090909090909091, |
|
"grad_norm": 1.4201287031173706, |
|
"learning_rate": 9.946824237646824e-05, |
|
"loss": 0.2398, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 1.569122314453125, |
|
"learning_rate": 9.937194443381972e-05, |
|
"loss": 0.2232, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.454545454545454, |
|
"grad_norm": 2.7534537315368652, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.1809, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 3.2907912731170654, |
|
"learning_rate": 9.915550124911866e-05, |
|
"loss": 0.2009, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.818181818181818, |
|
"grad_norm": 2.7623965740203857, |
|
"learning_rate": 9.903539087991462e-05, |
|
"loss": 0.2162, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.723360776901245, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.2611, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.9194618463516235, |
|
"eval_runtime": 9.6254, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 1.1859649419784546, |
|
"learning_rate": 9.877148934427037e-05, |
|
"loss": 0.13, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 1.631119966506958, |
|
"learning_rate": 9.862774069706346e-05, |
|
"loss": 0.0969, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.545454545454545, |
|
"grad_norm": 1.6909385919570923, |
|
"learning_rate": 9.847615725553456e-05, |
|
"loss": 0.0749, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 2.5971972942352295, |
|
"learning_rate": 9.831676344247342e-05, |
|
"loss": 0.0947, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"grad_norm": 2.3750057220458984, |
|
"learning_rate": 9.814958493905963e-05, |
|
"loss": 0.107, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.4669106006622314, |
|
"eval_runtime": 9.6325, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 1.5737000703811646, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.0641, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.272727272727273, |
|
"grad_norm": 1.2060961723327637, |
|
"learning_rate": 9.779198285281325e-05, |
|
"loss": 0.0515, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 0.8381581902503967, |
|
"learning_rate": 9.760161688604008e-05, |
|
"loss": 0.04, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.636363636363637, |
|
"grad_norm": 2.207648754119873, |
|
"learning_rate": 9.740358145174998e-05, |
|
"loss": 0.0481, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 2.044506311416626, |
|
"learning_rate": 9.719790845697533e-05, |
|
"loss": 0.0573, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 2.117966413497925, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0592, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.5538697242736816, |
|
"eval_runtime": 9.6217, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 1.4448171854019165, |
|
"learning_rate": 9.676378356149734e-05, |
|
"loss": 0.021, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.363636363636363, |
|
"grad_norm": 1.2305335998535156, |
|
"learning_rate": 9.653540160603956e-05, |
|
"loss": 0.0286, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 1.0550709962844849, |
|
"learning_rate": 9.629952196931901e-05, |
|
"loss": 0.0274, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.727272727272727, |
|
"grad_norm": 1.6463170051574707, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.0486, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 1.7919278144836426, |
|
"learning_rate": 9.580542287160348e-05, |
|
"loss": 0.06, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 2.47693133354187, |
|
"eval_runtime": 9.6239, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.090909090909092, |
|
"grad_norm": 0.7210983037948608, |
|
"learning_rate": 9.554728301876526e-05, |
|
"loss": 0.0226, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 0.9753442406654358, |
|
"learning_rate": 9.528180468815155e-05, |
|
"loss": 0.0168, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.454545454545455, |
|
"grad_norm": 1.1973837614059448, |
|
"learning_rate": 9.50090306530454e-05, |
|
"loss": 0.0248, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 0.9973687529563904, |
|
"learning_rate": 9.472900486219769e-05, |
|
"loss": 0.0107, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 1.4094384908676147, |
|
"learning_rate": 9.444177243274618e-05, |
|
"loss": 0.0223, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 1.581376552581787, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0548, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 2.9299166202545166, |
|
"eval_runtime": 9.6267, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 132, |
|
"total_flos": 1.6209403933884416e+16, |
|
"train_loss": 0.7309455393926438, |
|
"train_runtime": 1236.3069, |
|
"train_samples_per_second": 3.559, |
|
"train_steps_per_second": 0.445 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6209403933884416e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|