Llama2_Instruction_Finetuning_Experiments
/
llama2_7b_SGD_Cosine
/checkpoint-375
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9273570324574961, | |
"eval_steps": 500, | |
"global_step": 375, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0, | |
"learning_rate": 2.9999999999999997e-06, | |
"loss": 1.8153, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.0, | |
"learning_rate": 5.999999999999999e-06, | |
"loss": 1.7198, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 8.999999999999999e-06, | |
"loss": 1.8135, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.1999999999999999e-05, | |
"loss": 1.91, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.4999999999999999e-05, | |
"loss": 1.8073, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.7999999999999997e-05, | |
"loss": 1.848, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.1e-05, | |
"loss": 1.8294, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.3999999999999997e-05, | |
"loss": 1.9358, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.6999999999999996e-05, | |
"loss": 1.98, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.9999999999999997e-05, | |
"loss": 1.8625, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.2999999999999996e-05, | |
"loss": 1.8559, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.5999999999999994e-05, | |
"loss": 1.7621, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.9e-05, | |
"loss": 1.7816, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 4.2e-05, | |
"loss": 1.8272, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 4.4999999999999996e-05, | |
"loss": 1.8546, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 4.7999999999999994e-05, | |
"loss": 1.6863, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5.1e-05, | |
"loss": 1.6002, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5.399999999999999e-05, | |
"loss": 1.7753, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5.6999999999999996e-05, | |
"loss": 1.8031, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5.9999999999999995e-05, | |
"loss": 1.7884, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6.299999999999999e-05, | |
"loss": 1.6288, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6.599999999999999e-05, | |
"loss": 1.672, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 6.9e-05, | |
"loss": 1.6625, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.199999999999999e-05, | |
"loss": 1.6373, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.5e-05, | |
"loss": 1.5654, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.8e-05, | |
"loss": 1.5128, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.1e-05, | |
"loss": 1.5769, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.4e-05, | |
"loss": 1.4986, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.699999999999999e-05, | |
"loss": 1.514, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.999999999999999e-05, | |
"loss": 1.4492, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.3e-05, | |
"loss": 1.437, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.599999999999999e-05, | |
"loss": 1.4183, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.9e-05, | |
"loss": 1.3496, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.000102, | |
"loss": 1.3538, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00010499999999999999, | |
"loss": 1.2837, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00010799999999999998, | |
"loss": 1.2471, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00011099999999999999, | |
"loss": 1.2154, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00011399999999999999, | |
"loss": 1.1819, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.000117, | |
"loss": 1.16, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00011999999999999999, | |
"loss": 1.1309, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00012299999999999998, | |
"loss": 1.1511, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00012599999999999997, | |
"loss": 1.0796, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000129, | |
"loss": 1.0747, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00013199999999999998, | |
"loss": 1.0301, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000135, | |
"loss": 1.0205, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000138, | |
"loss": 1.05, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00014099999999999998, | |
"loss": 1.0128, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00014399999999999998, | |
"loss": 1.0066, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.000147, | |
"loss": 0.9924, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00015, | |
"loss": 1.0251, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00015299999999999998, | |
"loss": 0.9605, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000156, | |
"loss": 0.9755, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000159, | |
"loss": 0.9642, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000162, | |
"loss": 0.9631, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.000165, | |
"loss": 0.9736, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.000168, | |
"loss": 0.9673, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00017099999999999998, | |
"loss": 1.0058, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00017399999999999997, | |
"loss": 0.9245, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00017699999999999997, | |
"loss": 0.8959, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00017999999999999998, | |
"loss": 0.8951, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00018299999999999998, | |
"loss": 0.9796, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.000186, | |
"loss": 0.9347, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00018899999999999999, | |
"loss": 0.8796, | |
"step": 63 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00019199999999999998, | |
"loss": 0.8916, | |
"step": 64 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.000195, | |
"loss": 0.8951, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.000198, | |
"loss": 0.8821, | |
"step": 66 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.000201, | |
"loss": 0.8916, | |
"step": 67 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.000204, | |
"loss": 0.94, | |
"step": 68 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00020699999999999996, | |
"loss": 0.8569, | |
"step": 69 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00020999999999999998, | |
"loss": 0.8929, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021299999999999997, | |
"loss": 0.8895, | |
"step": 71 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021599999999999996, | |
"loss": 0.8258, | |
"step": 72 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021899999999999998, | |
"loss": 0.885, | |
"step": 73 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00022199999999999998, | |
"loss": 0.8788, | |
"step": 74 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.000225, | |
"loss": 0.8865, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00022799999999999999, | |
"loss": 0.8757, | |
"step": 76 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00023099999999999998, | |
"loss": 0.8821, | |
"step": 77 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.000234, | |
"loss": 0.8786, | |
"step": 78 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.000237, | |
"loss": 0.8665, | |
"step": 79 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00023999999999999998, | |
"loss": 0.8617, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.000243, | |
"loss": 0.8288, | |
"step": 81 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00024599999999999996, | |
"loss": 0.8719, | |
"step": 82 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.000249, | |
"loss": 0.872, | |
"step": 83 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.00025199999999999995, | |
"loss": 0.8618, | |
"step": 84 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.00025499999999999996, | |
"loss": 0.8502, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.000258, | |
"loss": 0.8855, | |
"step": 86 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.000261, | |
"loss": 0.8593, | |
"step": 87 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.00026399999999999997, | |
"loss": 0.878, | |
"step": 88 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.000267, | |
"loss": 0.8736, | |
"step": 89 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.00027, | |
"loss": 0.8757, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.00027299999999999997, | |
"loss": 0.848, | |
"step": 91 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000276, | |
"loss": 0.87, | |
"step": 92 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000279, | |
"loss": 0.8922, | |
"step": 93 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.00028199999999999997, | |
"loss": 0.8581, | |
"step": 94 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000285, | |
"loss": 0.8453, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00028799999999999995, | |
"loss": 0.8585, | |
"step": 96 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00029099999999999997, | |
"loss": 0.8634, | |
"step": 97 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.000294, | |
"loss": 0.8409, | |
"step": 98 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00029699999999999996, | |
"loss": 0.8882, | |
"step": 99 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.0003, | |
"loss": 0.8696, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029999199041570257, | |
"loss": 0.8779, | |
"step": 101 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029996796251818966, | |
"loss": 0.8137, | |
"step": 102 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029992791887350736, | |
"loss": 0.8419, | |
"step": 103 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002998718637580951, | |
"loss": 0.8666, | |
"step": 104 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002997998031583285, | |
"loss": 0.8451, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002997117447698802, | |
"loss": 0.875, | |
"step": 106 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.00029960769799689793, | |
"loss": 0.8658, | |
"step": 107 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029948767395100045, | |
"loss": 0.8738, | |
"step": 108 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.0002993516854500905, | |
"loss": 0.8324, | |
"step": 109 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029919974701698635, | |
"loss": 0.8494, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029903187487787046, | |
"loss": 0.8624, | |
"step": 111 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002988480869605567, | |
"loss": 0.8772, | |
"step": 112 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002986484028925761, | |
"loss": 0.8527, | |
"step": 113 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002984328439990804, | |
"loss": 0.8234, | |
"step": 114 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002982014333005645, | |
"loss": 0.7951, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029795419551040833, | |
"loss": 0.8506, | |
"step": 116 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029769115703223763, | |
"loss": 0.8084, | |
"step": 117 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.0002974123459571039, | |
"loss": 0.8541, | |
"step": 118 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029711779206048454, | |
"loss": 0.8425, | |
"step": 119 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.00029680752679910315, | |
"loss": 0.8619, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.00029648158330756986, | |
"loss": 0.8502, | |
"step": 121 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0002961399963948431, | |
"loss": 0.8482, | |
"step": 122 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0002957828025405117, | |
"loss": 0.8647, | |
"step": 123 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.0002954100398908995, | |
"loss": 0.8427, | |
"step": 124 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.00029502174825499146, | |
"loss": 0.8723, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.000294617969100182, | |
"loss": 0.8716, | |
"step": 126 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.00029419874554784695, | |
"loss": 0.8385, | |
"step": 127 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.0002937641223687379, | |
"loss": 0.841, | |
"step": 128 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029331414597820145, | |
"loss": 0.838, | |
"step": 129 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029284886443122214, | |
"loss": 0.8321, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029236832741729016, | |
"loss": 0.9036, | |
"step": 131 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.00029187258625509513, | |
"loss": 0.8804, | |
"step": 132 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002913616938870455, | |
"loss": 0.7992, | |
"step": 133 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002908357048736144, | |
"loss": 0.8204, | |
"step": 134 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.00029029467538751303, | |
"loss": 0.8584, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.00028973866320769183, | |
"loss": 0.8478, | |
"step": 136 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.00028916772771316973, | |
"loss": 0.8135, | |
"step": 137 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.000288581929876693, | |
"loss": 0.8852, | |
"step": 138 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.0002879813322582237, | |
"loss": 0.8446, | |
"step": 139 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028736599899825856, | |
"loss": 0.8527, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.0002867359958109792, | |
"loss": 0.85, | |
"step": 141 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028609138997723397, | |
"loss": 0.871, | |
"step": 142 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028543225033735313, | |
"loss": 0.8208, | |
"step": 143 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002847586472837968, | |
"loss": 0.8125, | |
"step": 144 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.00028407065275363753, | |
"loss": 0.8421, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002833683402208777, | |
"loss": 0.8677, | |
"step": 146 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002826517846886033, | |
"loss": 0.8242, | |
"step": 147 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.00028192106268097334, | |
"loss": 0.8747, | |
"step": 148 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.0002811762522350481, | |
"loss": 0.815, | |
"step": 149 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.000280417432892455, | |
"loss": 0.8432, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.0002796446856908939, | |
"loss": 0.8256, | |
"step": 151 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002788580931554828, | |
"loss": 0.856, | |
"step": 152 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002780577392899446, | |
"loss": 0.8366, | |
"step": 153 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.00027724370956763603, | |
"loss": 0.8666, | |
"step": 154 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002764160909224196, | |
"loss": 0.84, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00027557497173937923, | |
"loss": 0.8468, | |
"step": 156 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.0002747204418453818, | |
"loss": 0.8057, | |
"step": 157 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00027385259249948333, | |
"loss": 0.8228, | |
"step": 158 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.000272971516383184, | |
"loss": 0.8424, | |
"step": 159 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.00027207730759052924, | |
"loss": 0.8181, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.0002711700616180619, | |
"loss": 0.8378, | |
"step": 161 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.0002702498753546232, | |
"loss": 0.8903, | |
"step": 162 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.00026931684707100586, | |
"loss": 0.8211, | |
"step": 163 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026837107640945905, | |
"loss": 0.8213, | |
"step": 164 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026741266437304716, | |
"loss": 0.811, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.0002664417133148636, | |
"loss": 0.8481, | |
"step": 166 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026545832692709964, | |
"loss": 0.8715, | |
"step": 167 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.00026446261022997097, | |
"loss": 0.8717, | |
"step": 168 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.00026345466956050176, | |
"loss": 0.8589, | |
"step": 169 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.0002624346125611689, | |
"loss": 0.8298, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.000261402548168406, | |
"loss": 0.859, | |
"step": 171 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002603585866009697, | |
"loss": 0.8108, | |
"step": 172 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002593028393481692, | |
"loss": 0.8591, | |
"step": 173 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002582354191579593, | |
"loss": 0.8521, | |
"step": 174 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.00025715644002489996, | |
"loss": 0.8394, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025606601717798207, | |
"loss": 0.8457, | |
"step": 176 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025496426706832193, | |
"loss": 0.8656, | |
"step": 177 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.0002538513073567244, | |
"loss": 0.8678, | |
"step": 178 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025272725690111806, | |
"loss": 0.8367, | |
"step": 179 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00025159223574386114, | |
"loss": 0.8449, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00025044636509892227, | |
"loss": 0.8003, | |
"step": 181 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00024928976733893494, | |
"loss": 0.8312, | |
"step": 182 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.0002481225659821294, | |
"loss": 0.8567, | |
"step": 183 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024694488567914106, | |
"loss": 0.872, | |
"step": 184 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0002457568521996988, | |
"loss": 0.8735, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024455859241919326, | |
"loss": 0.8571, | |
"step": 186 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0002433502343051274, | |
"loss": 0.8395, | |
"step": 187 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024213190690345018, | |
"loss": 0.8439, | |
"step": 188 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00024090374032477533, | |
"loss": 0.8661, | |
"step": 189 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.0002396658657304861, | |
"loss": 0.8679, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00023841841531872798, | |
"loss": 0.8151, | |
"step": 191 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00023716152231029072, | |
"loss": 0.8517, | |
"step": 192 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.000235895320934381, | |
"loss": 0.8591, | |
"step": 193 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.00023461994641428766, | |
"loss": 0.8638, | |
"step": 194 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.0002333355349529403, | |
"loss": 0.8512, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.00023204222371836405, | |
"loss": 0.835, | |
"step": 196 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00023074015082903015, | |
"loss": 0.8611, | |
"step": 197 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.0002294294553391063, | |
"loss": 0.7981, | |
"step": 198 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00022811027722360598, | |
"loss": 0.84, | |
"step": 199 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00022678275736344014, | |
"loss": 0.8008, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022544703753037178, | |
"loss": 0.8333, | |
"step": 201 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022410326037187558, | |
"loss": 0.8197, | |
"step": 202 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022275156939590392, | |
"loss": 0.8408, | |
"step": 203 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022139210895556104, | |
"loss": 0.8608, | |
"step": 204 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.00022002502423368678, | |
"loss": 0.8705, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0002186504612273522, | |
"loss": 0.8388, | |
"step": 206 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0002172685667322676, | |
"loss": 0.8779, | |
"step": 207 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.00021587948832710554, | |
"loss": 0.8314, | |
"step": 208 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002144833743577405, | |
"loss": 0.8249, | |
"step": 209 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002130803739214061, | |
"loss": 0.8312, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.00021167063685077262, | |
"loss": 0.8003, | |
"step": 211 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002102543136979454, | |
"loss": 0.8513, | |
"step": 212 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020883155571838692, | |
"loss": 0.835, | |
"step": 213 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020740251485476345, | |
"loss": 0.8981, | |
"step": 214 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020596734372071852, | |
"loss": 0.8353, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020452619558457446, | |
"loss": 0.8457, | |
"step": 216 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00020307922435296443, | |
"loss": 0.8225, | |
"step": 217 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.0002016265845543958, | |
"loss": 0.8342, | |
"step": 218 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00020016843132274746, | |
"loss": 0.807, | |
"step": 219 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00019870492038070252, | |
"loss": 0.8271, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019723620802311774, | |
"loss": 0.8731, | |
"step": 221 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019576245110033231, | |
"loss": 0.8436, | |
"step": 222 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019428380700141698, | |
"loss": 0.8816, | |
"step": 223 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019280043363736579, | |
"loss": 0.8281, | |
"step": 224 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.0001913124894242322, | |
"loss": 0.8413, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018982013326621083, | |
"loss": 0.8318, | |
"step": 226 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018832352453866777, | |
"loss": 0.8394, | |
"step": 227 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018682282307111987, | |
"loss": 0.819, | |
"step": 228 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018531818913016584, | |
"loss": 0.8598, | |
"step": 229 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018380978340237092, | |
"loss": 0.8346, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018229776697710617, | |
"loss": 0.8523, | |
"step": 231 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018078230132934512, | |
"loss": 0.8461, | |
"step": 232 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017926354830241924, | |
"loss": 0.8368, | |
"step": 233 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017774167009073377, | |
"loss": 0.8336, | |
"step": 234 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017621682922244633, | |
"loss": 0.8049, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017468918854211007, | |
"loss": 0.8245, | |
"step": 236 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0001731589111932823, | |
"loss": 0.8474, | |
"step": 237 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.000171626160601102, | |
"loss": 0.8643, | |
"step": 238 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0001700911004548369, | |
"loss": 0.8448, | |
"step": 239 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.00016855389469040217, | |
"loss": 0.8822, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016701470747285317, | |
"loss": 0.8225, | |
"step": 241 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016547370317885354, | |
"loss": 0.8269, | |
"step": 242 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.0001639310463791205, | |
"loss": 0.8365, | |
"step": 243 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016238690182084986, | |
"loss": 0.8265, | |
"step": 244 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.00016084143441012156, | |
"loss": 0.8439, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.0001592948091942892, | |
"loss": 0.8438, | |
"step": 246 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.0001577471913443532, | |
"loss": 0.8595, | |
"step": 247 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.00015619874613732196, | |
"loss": 0.8313, | |
"step": 248 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.0001546496389385611, | |
"loss": 0.8258, | |
"step": 249 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015310003518413315, | |
"loss": 0.7778, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015155010036313008, | |
"loss": 0.8442, | |
"step": 251 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015, | |
"loss": 0.8477, | |
"step": 252 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014844989963686992, | |
"loss": 0.8447, | |
"step": 253 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014689996481586688, | |
"loss": 0.8424, | |
"step": 254 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014535036106143892, | |
"loss": 0.8357, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.000143801253862678, | |
"loss": 0.8467, | |
"step": 256 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001422528086556468, | |
"loss": 0.8105, | |
"step": 257 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001407051908057108, | |
"loss": 0.8215, | |
"step": 258 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001391585655898784, | |
"loss": 0.8452, | |
"step": 259 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.00013761309817915014, | |
"loss": 0.7967, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013606895362087949, | |
"loss": 0.8172, | |
"step": 261 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013452629682114646, | |
"loss": 0.8837, | |
"step": 262 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013298529252714684, | |
"loss": 0.8293, | |
"step": 263 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013144610530959784, | |
"loss": 0.8493, | |
"step": 264 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.0001299088995451631, | |
"loss": 0.7906, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012837383939889798, | |
"loss": 0.8201, | |
"step": 266 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012684108880671772, | |
"loss": 0.7947, | |
"step": 267 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012531081145788987, | |
"loss": 0.8238, | |
"step": 268 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012378317077755362, | |
"loss": 0.8417, | |
"step": 269 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012225832990926623, | |
"loss": 0.8728, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012073645169758076, | |
"loss": 0.798, | |
"step": 271 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00011921769867065485, | |
"loss": 0.8053, | |
"step": 272 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011770223302289385, | |
"loss": 0.7943, | |
"step": 273 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.0001161902165976291, | |
"loss": 0.8407, | |
"step": 274 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011468181086983412, | |
"loss": 0.8326, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011317717692888012, | |
"loss": 0.8355, | |
"step": 276 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.0001116764754613322, | |
"loss": 0.8015, | |
"step": 277 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00011017986673378918, | |
"loss": 0.8426, | |
"step": 278 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010868751057576782, | |
"loss": 0.858, | |
"step": 279 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010719956636263423, | |
"loss": 0.8268, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010571619299858303, | |
"loss": 0.8244, | |
"step": 281 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010423754889966769, | |
"loss": 0.8328, | |
"step": 282 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010276379197688222, | |
"loss": 0.8201, | |
"step": 283 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010129507961929748, | |
"loss": 0.804, | |
"step": 284 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 9.983156867725255e-05, | |
"loss": 0.8273, | |
"step": 285 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.837341544560423e-05, | |
"loss": 0.8222, | |
"step": 286 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.692077564703555e-05, | |
"loss": 0.828, | |
"step": 287 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.547380441542549e-05, | |
"loss": 0.8507, | |
"step": 288 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.403265627928147e-05, | |
"loss": 0.8044, | |
"step": 289 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 9.259748514523653e-05, | |
"loss": 0.8202, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 9.116844428161309e-05, | |
"loss": 0.7773, | |
"step": 291 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 8.97456863020546e-05, | |
"loss": 0.8119, | |
"step": 292 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 8.83293631492274e-05, | |
"loss": 0.8274, | |
"step": 293 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.691962607859386e-05, | |
"loss": 0.8167, | |
"step": 294 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.55166256422595e-05, | |
"loss": 0.8389, | |
"step": 295 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.412051167289446e-05, | |
"loss": 0.8091, | |
"step": 296 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.27314332677324e-05, | |
"loss": 0.8418, | |
"step": 297 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 8.134953877264778e-05, | |
"loss": 0.8105, | |
"step": 298 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.997497576631323e-05, | |
"loss": 0.8409, | |
"step": 299 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.860789104443896e-05, | |
"loss": 0.8429, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.724843060409606e-05, | |
"loss": 0.8175, | |
"step": 301 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.589673962812442e-05, | |
"loss": 0.8115, | |
"step": 302 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.455296246962823e-05, | |
"loss": 0.846, | |
"step": 303 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.321724263655988e-05, | |
"loss": 0.8175, | |
"step": 304 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.188972277639405e-05, | |
"loss": 0.8177, | |
"step": 305 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 7.057054466089371e-05, | |
"loss": 0.8441, | |
"step": 306 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.925984917096985e-05, | |
"loss": 0.8272, | |
"step": 307 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.795777628163599e-05, | |
"loss": 0.8525, | |
"step": 308 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.66644650470597e-05, | |
"loss": 0.8533, | |
"step": 309 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.538005358571234e-05, | |
"loss": 0.8436, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.410467906561896e-05, | |
"loss": 0.832, | |
"step": 311 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.283847768970926e-05, | |
"loss": 0.7897, | |
"step": 312 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.158158468127196e-05, | |
"loss": 0.824, | |
"step": 313 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 6.0334134269513865e-05, | |
"loss": 0.8435, | |
"step": 314 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.9096259675224647e-05, | |
"loss": 0.8233, | |
"step": 315 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.786809309654982e-05, | |
"loss": 0.8333, | |
"step": 316 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.664976569487263e-05, | |
"loss": 0.8658, | |
"step": 317 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.5441407580806745e-05, | |
"loss": 0.8229, | |
"step": 318 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.4243147800301134e-05, | |
"loss": 0.7847, | |
"step": 319 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.305511432085884e-05, | |
"loss": 0.7924, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.187743401787054e-05, | |
"loss": 0.8458, | |
"step": 321 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 5.071023266106502e-05, | |
"loss": 0.8492, | |
"step": 322 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.955363490107777e-05, | |
"loss": 0.8584, | |
"step": 323 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.840776425613886e-05, | |
"loss": 0.8262, | |
"step": 324 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.727274309888191e-05, | |
"loss": 0.8299, | |
"step": 325 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.614869264327553e-05, | |
"loss": 0.8267, | |
"step": 326 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.503573293167805e-05, | |
"loss": 0.8352, | |
"step": 327 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.3933982822017876e-05, | |
"loss": 0.8181, | |
"step": 328 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.284355997510003e-05, | |
"loss": 0.8091, | |
"step": 329 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 4.17645808420407e-05, | |
"loss": 0.8123, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 4.0697160651830814e-05, | |
"loss": 0.8075, | |
"step": 331 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.964141339903026e-05, | |
"loss": 0.8576, | |
"step": 332 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.8597451831594014e-05, | |
"loss": 0.8134, | |
"step": 333 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.756538743883111e-05, | |
"loss": 0.8384, | |
"step": 334 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.654533043949823e-05, | |
"loss": 0.8061, | |
"step": 335 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.5537389770029046e-05, | |
"loss": 0.8438, | |
"step": 336 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.454167307290036e-05, | |
"loss": 0.8024, | |
"step": 337 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.3558286685136384e-05, | |
"loss": 0.8332, | |
"step": 338 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.258733562695283e-05, | |
"loss": 0.8247, | |
"step": 339 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.162892359054098e-05, | |
"loss": 0.8482, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.0683152928994105e-05, | |
"loss": 0.8171, | |
"step": 341 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.9750124645376755e-05, | |
"loss": 0.8296, | |
"step": 342 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.8829938381938117e-05, | |
"loss": 0.8403, | |
"step": 343 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.792269240947076e-05, | |
"loss": 0.8472, | |
"step": 344 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.702848361681605e-05, | |
"loss": 0.8305, | |
"step": 345 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.6147407500516643e-05, | |
"loss": 0.8491, | |
"step": 346 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.5279558154618197e-05, | |
"loss": 0.8299, | |
"step": 347 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.4425028260620715e-05, | |
"loss": 0.8329, | |
"step": 348 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.35839090775804e-05, | |
"loss": 0.8234, | |
"step": 349 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.2756290432363957e-05, | |
"loss": 0.8197, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.1942260710055386e-05, | |
"loss": 0.8238, | |
"step": 351 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.1141906844517203e-05, | |
"loss": 0.8051, | |
"step": 352 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.0355314309106097e-05, | |
"loss": 0.8269, | |
"step": 353 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.9582567107544962e-05, | |
"loss": 0.8325, | |
"step": 354 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.882374776495187e-05, | |
"loss": 0.843, | |
"step": 355 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.8078937319026654e-05, | |
"loss": 0.8352, | |
"step": 356 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.734821531139667e-05, | |
"loss": 0.8756, | |
"step": 357 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.663165977912221e-05, | |
"loss": 0.8193, | |
"step": 358 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.5929347246362452e-05, | |
"loss": 0.8123, | |
"step": 359 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.524135271620317e-05, | |
"loss": 0.807, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.456774966264685e-05, | |
"loss": 0.878, | |
"step": 361 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.390861002276602e-05, | |
"loss": 0.8185, | |
"step": 362 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.3264004189020777e-05, | |
"loss": 0.7939, | |
"step": 363 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.2634001001741373e-05, | |
"loss": 0.8163, | |
"step": 364 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.2018667741776266e-05, | |
"loss": 0.8429, | |
"step": 365 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.1418070123306989e-05, | |
"loss": 0.8292, | |
"step": 366 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.0832272286830285e-05, | |
"loss": 0.8241, | |
"step": 367 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.0261336792308167e-05, | |
"loss": 0.8438, | |
"step": 368 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 9.705324612486936e-06, | |
"loss": 0.8631, | |
"step": 369 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 9.164295126385562e-06, | |
"loss": 0.8359, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 8.638306112954452e-06, | |
"loss": 0.8304, | |
"step": 371 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 8.127413744904804e-06, | |
"loss": 0.7995, | |
"step": 372 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 7.631672582709808e-06, | |
"loss": 0.817, | |
"step": 373 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 7.151135568777838e-06, | |
"loss": 0.8264, | |
"step": 374 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 6.685854021798509e-06, | |
"loss": 0.8166, | |
"step": 375 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 404, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 25, | |
"total_flos": 4.214459938342994e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |