llama-2FineTuned / checkpoint-160 /trainer_state.json
bilkultheek's picture
Initial commit
16fa3da verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8533333333333334,
"global_step": 160,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1.1764705882352942e-05,
"loss": 1.9122,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2.3529411764705884e-05,
"loss": 2.0707,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 3.529411764705883e-05,
"loss": 1.9703,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 4.705882352941177e-05,
"loss": 2.015,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 5.882352941176471e-05,
"loss": 2.119,
"step": 5
},
{
"epoch": 0.03,
"learning_rate": 7.058823529411765e-05,
"loss": 2.0515,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 8.23529411764706e-05,
"loss": 1.7436,
"step": 7
},
{
"epoch": 0.04,
"learning_rate": 9.411764705882353e-05,
"loss": 1.9326,
"step": 8
},
{
"epoch": 0.05,
"learning_rate": 0.00010588235294117647,
"loss": 2.0819,
"step": 9
},
{
"epoch": 0.05,
"learning_rate": 0.00011764705882352942,
"loss": 1.9965,
"step": 10
},
{
"epoch": 0.06,
"learning_rate": 0.00012941176470588237,
"loss": 2.0543,
"step": 11
},
{
"epoch": 0.06,
"learning_rate": 0.0001411764705882353,
"loss": 2.0461,
"step": 12
},
{
"epoch": 0.07,
"learning_rate": 0.00015294117647058822,
"loss": 2.311,
"step": 13
},
{
"epoch": 0.07,
"learning_rate": 0.0001647058823529412,
"loss": 1.8447,
"step": 14
},
{
"epoch": 0.08,
"learning_rate": 0.00017647058823529413,
"loss": 2.6263,
"step": 15
},
{
"epoch": 0.09,
"learning_rate": 0.00018823529411764707,
"loss": 2.6179,
"step": 16
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 2.535,
"step": 17
},
{
"epoch": 0.1,
"learning_rate": 0.00019999833248118322,
"loss": 2.5924,
"step": 18
},
{
"epoch": 0.1,
"learning_rate": 0.00019999332998034514,
"loss": 2.3562,
"step": 19
},
{
"epoch": 0.11,
"learning_rate": 0.00019998499266432108,
"loss": 2.2352,
"step": 20
},
{
"epoch": 0.11,
"learning_rate": 0.00019997332081116373,
"loss": 2.152,
"step": 21
},
{
"epoch": 0.12,
"learning_rate": 0.00019995831481013374,
"loss": 2.1149,
"step": 22
},
{
"epoch": 0.12,
"learning_rate": 0.00019993997516168689,
"loss": 2.1608,
"step": 23
},
{
"epoch": 0.13,
"learning_rate": 0.0001999183024774573,
"loss": 2.1593,
"step": 24
},
{
"epoch": 0.13,
"learning_rate": 0.00019989329748023725,
"loss": 2.2774,
"step": 25
},
{
"epoch": 0.14,
"learning_rate": 0.00019986496100395275,
"loss": 2.0355,
"step": 26
},
{
"epoch": 0.14,
"learning_rate": 0.00019983329399363598,
"loss": 1.9455,
"step": 27
},
{
"epoch": 0.15,
"learning_rate": 0.0001997982975053936,
"loss": 2.5648,
"step": 28
},
{
"epoch": 0.15,
"learning_rate": 0.0001997599727063717,
"loss": 2.3619,
"step": 29
},
{
"epoch": 0.16,
"learning_rate": 0.00019971832087471676,
"loss": 2.0888,
"step": 30
},
{
"epoch": 0.17,
"learning_rate": 0.000199673343399533,
"loss": 1.9363,
"step": 31
},
{
"epoch": 0.17,
"learning_rate": 0.00019962504178083618,
"loss": 2.0259,
"step": 32
},
{
"epoch": 0.18,
"learning_rate": 0.00019957341762950344,
"loss": 1.8482,
"step": 33
},
{
"epoch": 0.18,
"learning_rate": 0.0001995184726672197,
"loss": 1.8542,
"step": 34
},
{
"epoch": 0.19,
"learning_rate": 0.0001994602087264201,
"loss": 1.9435,
"step": 35
},
{
"epoch": 0.19,
"learning_rate": 0.00019939862775022893,
"loss": 1.6559,
"step": 36
},
{
"epoch": 0.2,
"learning_rate": 0.00019933373179239502,
"loss": 1.9478,
"step": 37
},
{
"epoch": 0.2,
"learning_rate": 0.0001992655230172229,
"loss": 1.9417,
"step": 38
},
{
"epoch": 0.21,
"learning_rate": 0.000199194003699501,
"loss": 1.9067,
"step": 39
},
{
"epoch": 0.21,
"learning_rate": 0.00019911917622442537,
"loss": 1.6106,
"step": 40
},
{
"epoch": 0.22,
"learning_rate": 0.0001990410430875205,
"loss": 1.2742,
"step": 41
},
{
"epoch": 0.22,
"learning_rate": 0.00019895960689455598,
"loss": 1.3048,
"step": 42
},
{
"epoch": 0.23,
"learning_rate": 0.0001988748703614594,
"loss": 1.1847,
"step": 43
},
{
"epoch": 0.23,
"learning_rate": 0.00019878683631422605,
"loss": 1.1683,
"step": 44
},
{
"epoch": 0.24,
"learning_rate": 0.00019869550768882455,
"loss": 1.1531,
"step": 45
},
{
"epoch": 0.25,
"learning_rate": 0.00019860088753109896,
"loss": 1.1401,
"step": 46
},
{
"epoch": 0.25,
"learning_rate": 0.0001985029789966671,
"loss": 1.8959,
"step": 47
},
{
"epoch": 0.26,
"learning_rate": 0.00019840178535081545,
"loss": 1.897,
"step": 48
},
{
"epoch": 0.26,
"learning_rate": 0.0001982973099683902,
"loss": 1.791,
"step": 49
},
{
"epoch": 0.27,
"learning_rate": 0.00019818955633368464,
"loss": 1.7451,
"step": 50
},
{
"epoch": 0.27,
"learning_rate": 0.00019807852804032305,
"loss": 1.8556,
"step": 51
},
{
"epoch": 0.28,
"learning_rate": 0.00019796422879114084,
"loss": 1.7882,
"step": 52
},
{
"epoch": 0.28,
"learning_rate": 0.0001978466623980609,
"loss": 1.8834,
"step": 53
},
{
"epoch": 0.29,
"learning_rate": 0.00019772583278196678,
"loss": 1.7593,
"step": 54
},
{
"epoch": 0.29,
"learning_rate": 0.00019760174397257156,
"loss": 1.8291,
"step": 55
},
{
"epoch": 0.3,
"learning_rate": 0.00019747440010828383,
"loss": 1.808,
"step": 56
},
{
"epoch": 0.3,
"learning_rate": 0.0001973438054360693,
"loss": 1.6324,
"step": 57
},
{
"epoch": 0.31,
"learning_rate": 0.00019720996431130946,
"loss": 1.483,
"step": 58
},
{
"epoch": 0.31,
"learning_rate": 0.00019707288119765623,
"loss": 1.9648,
"step": 59
},
{
"epoch": 0.32,
"learning_rate": 0.000196932560666883,
"loss": 1.5742,
"step": 60
},
{
"epoch": 0.33,
"learning_rate": 0.00019678900739873226,
"loss": 1.7066,
"step": 61
},
{
"epoch": 0.33,
"learning_rate": 0.00019664222618075958,
"loss": 2.0719,
"step": 62
},
{
"epoch": 0.34,
"learning_rate": 0.0001964922219081738,
"loss": 1.9432,
"step": 63
},
{
"epoch": 0.34,
"learning_rate": 0.00019633899958367384,
"loss": 1.7979,
"step": 64
},
{
"epoch": 0.35,
"learning_rate": 0.00019618256431728194,
"loss": 1.6286,
"step": 65
},
{
"epoch": 0.35,
"learning_rate": 0.000196022921326173,
"loss": 1.6031,
"step": 66
},
{
"epoch": 0.36,
"learning_rate": 0.00019586007593450097,
"loss": 1.5169,
"step": 67
},
{
"epoch": 0.36,
"learning_rate": 0.0001956940335732209,
"loss": 1.5923,
"step": 68
},
{
"epoch": 0.37,
"learning_rate": 0.000195524799779908,
"loss": 1.6123,
"step": 69
},
{
"epoch": 0.37,
"learning_rate": 0.000195352380198573,
"loss": 1.7392,
"step": 70
},
{
"epoch": 0.38,
"learning_rate": 0.00019517678057947384,
"loss": 1.6406,
"step": 71
},
{
"epoch": 0.38,
"learning_rate": 0.00019499800677892385,
"loss": 1.5532,
"step": 72
},
{
"epoch": 0.39,
"learning_rate": 0.0001948160647590966,
"loss": 1.816,
"step": 73
},
{
"epoch": 0.39,
"learning_rate": 0.0001946309605878269,
"loss": 1.5759,
"step": 74
},
{
"epoch": 0.4,
"learning_rate": 0.00019444270043840852,
"loss": 1.6726,
"step": 75
},
{
"epoch": 0.41,
"learning_rate": 0.00019425129058938832,
"loss": 1.6134,
"step": 76
},
{
"epoch": 0.41,
"learning_rate": 0.00019405673742435678,
"loss": 1.6285,
"step": 77
},
{
"epoch": 0.42,
"learning_rate": 0.00019385904743173516,
"loss": 1.6023,
"step": 78
},
{
"epoch": 0.42,
"learning_rate": 0.00019365822720455916,
"loss": 1.5177,
"step": 79
},
{
"epoch": 0.43,
"learning_rate": 0.00019345428344025883,
"loss": 1.5413,
"step": 80
},
{
"epoch": 0.43,
"learning_rate": 0.00019324722294043558,
"loss": 1.3404,
"step": 81
},
{
"epoch": 0.44,
"learning_rate": 0.00019303705261063497,
"loss": 1.6409,
"step": 82
},
{
"epoch": 0.44,
"learning_rate": 0.00019282377946011652,
"loss": 1.6361,
"step": 83
},
{
"epoch": 0.45,
"learning_rate": 0.00019260741060162016,
"loss": 1.4902,
"step": 84
},
{
"epoch": 0.45,
"learning_rate": 0.0001923879532511287,
"loss": 1.3232,
"step": 85
},
{
"epoch": 0.46,
"learning_rate": 0.00019216541472762735,
"loss": 1.2438,
"step": 86
},
{
"epoch": 0.46,
"learning_rate": 0.00019193980245285966,
"loss": 1.0828,
"step": 87
},
{
"epoch": 0.47,
"learning_rate": 0.00019171112395107985,
"loss": 1.0696,
"step": 88
},
{
"epoch": 0.47,
"learning_rate": 0.0001914793868488021,
"loss": 1.0179,
"step": 89
},
{
"epoch": 0.48,
"learning_rate": 0.0001912445988745459,
"loss": 1.0012,
"step": 90
},
{
"epoch": 0.49,
"learning_rate": 0.0001910067678585786,
"loss": 0.9423,
"step": 91
},
{
"epoch": 0.49,
"learning_rate": 0.00019076590173265406,
"loss": 0.9173,
"step": 92
},
{
"epoch": 0.5,
"learning_rate": 0.00019052200852974819,
"loss": 1.8901,
"step": 93
},
{
"epoch": 0.5,
"learning_rate": 0.0001902750963837912,
"loss": 1.9665,
"step": 94
},
{
"epoch": 0.51,
"learning_rate": 0.00019002517352939598,
"loss": 1.8323,
"step": 95
},
{
"epoch": 0.51,
"learning_rate": 0.0001897722483015838,
"loss": 1.7828,
"step": 96
},
{
"epoch": 0.52,
"learning_rate": 0.00018951632913550626,
"loss": 1.5083,
"step": 97
},
{
"epoch": 0.52,
"learning_rate": 0.00018925742456616374,
"loss": 1.6924,
"step": 98
},
{
"epoch": 0.53,
"learning_rate": 0.0001889955432281212,
"loss": 1.7446,
"step": 99
},
{
"epoch": 0.53,
"learning_rate": 0.0001887306938552197,
"loss": 1.6143,
"step": 100
},
{
"epoch": 0.54,
"learning_rate": 0.00018846288528028555,
"loss": 1.5013,
"step": 101
},
{
"epoch": 0.54,
"learning_rate": 0.0001881921264348355,
"loss": 1.6335,
"step": 102
},
{
"epoch": 0.55,
"learning_rate": 0.00018791842634877898,
"loss": 1.5852,
"step": 103
},
{
"epoch": 0.55,
"learning_rate": 0.00018764179415011682,
"loss": 1.4358,
"step": 104
},
{
"epoch": 0.56,
"learning_rate": 0.00018736223906463696,
"loss": 1.7103,
"step": 105
},
{
"epoch": 0.57,
"learning_rate": 0.0001870797704156067,
"loss": 1.3361,
"step": 106
},
{
"epoch": 0.57,
"learning_rate": 0.00018679439762346185,
"loss": 1.7021,
"step": 107
},
{
"epoch": 0.58,
"learning_rate": 0.00018650613020549232,
"loss": 2.3076,
"step": 108
},
{
"epoch": 0.58,
"learning_rate": 0.00018621497777552507,
"loss": 1.6109,
"step": 109
},
{
"epoch": 0.59,
"learning_rate": 0.00018592095004360318,
"loss": 1.8032,
"step": 110
},
{
"epoch": 0.59,
"learning_rate": 0.00018562405681566216,
"loss": 1.6432,
"step": 111
},
{
"epoch": 0.6,
"learning_rate": 0.0001853243079932029,
"loss": 1.4159,
"step": 112
},
{
"epoch": 0.6,
"learning_rate": 0.00018502171357296144,
"loss": 1.4869,
"step": 113
},
{
"epoch": 0.61,
"learning_rate": 0.00018471628364657555,
"loss": 1.419,
"step": 114
},
{
"epoch": 0.61,
"learning_rate": 0.00018440802840024822,
"loss": 1.3616,
"step": 115
},
{
"epoch": 0.62,
"learning_rate": 0.00018409695811440796,
"loss": 1.4805,
"step": 116
},
{
"epoch": 0.62,
"learning_rate": 0.00018378308316336584,
"loss": 1.5585,
"step": 117
},
{
"epoch": 0.63,
"learning_rate": 0.0001834664140149696,
"loss": 1.4302,
"step": 118
},
{
"epoch": 0.63,
"learning_rate": 0.00018314696123025454,
"loss": 1.3596,
"step": 119
},
{
"epoch": 0.64,
"learning_rate": 0.0001828247354630912,
"loss": 1.5605,
"step": 120
},
{
"epoch": 0.65,
"learning_rate": 0.00018249974745983023,
"loss": 1.5355,
"step": 121
},
{
"epoch": 0.65,
"learning_rate": 0.00018217200805894384,
"loss": 1.5559,
"step": 122
},
{
"epoch": 0.66,
"learning_rate": 0.00018184152819066435,
"loss": 1.3455,
"step": 123
},
{
"epoch": 0.66,
"learning_rate": 0.00018150831887661978,
"loss": 1.4451,
"step": 124
},
{
"epoch": 0.67,
"learning_rate": 0.00018117239122946615,
"loss": 1.302,
"step": 125
},
{
"epoch": 0.67,
"learning_rate": 0.00018083375645251684,
"loss": 1.2725,
"step": 126
},
{
"epoch": 0.68,
"learning_rate": 0.0001804924258393692,
"loss": 1.2186,
"step": 127
},
{
"epoch": 0.68,
"learning_rate": 0.00018014841077352762,
"loss": 1.2388,
"step": 128
},
{
"epoch": 0.69,
"learning_rate": 0.000179801722728024,
"loss": 1.2655,
"step": 129
},
{
"epoch": 0.69,
"learning_rate": 0.00017945237326503507,
"loss": 1.1938,
"step": 130
},
{
"epoch": 0.7,
"learning_rate": 0.00017910037403549693,
"loss": 1.1203,
"step": 131
},
{
"epoch": 0.7,
"learning_rate": 0.0001787457367787164,
"loss": 1.1576,
"step": 132
},
{
"epoch": 0.71,
"learning_rate": 0.00017838847332197938,
"loss": 1.0116,
"step": 133
},
{
"epoch": 0.71,
"learning_rate": 0.00017802859558015664,
"loss": 0.8979,
"step": 134
},
{
"epoch": 0.72,
"learning_rate": 0.00017766611555530636,
"loss": 0.8594,
"step": 135
},
{
"epoch": 0.73,
"learning_rate": 0.0001773010453362737,
"loss": 0.819,
"step": 136
},
{
"epoch": 0.73,
"learning_rate": 0.00017693339709828792,
"loss": 0.8788,
"step": 137
},
{
"epoch": 0.74,
"learning_rate": 0.00017656318310255604,
"loss": 0.8013,
"step": 138
},
{
"epoch": 0.74,
"learning_rate": 0.00017619041569585418,
"loss": 1.6861,
"step": 139
},
{
"epoch": 0.75,
"learning_rate": 0.0001758151073101157,
"loss": 1.6799,
"step": 140
},
{
"epoch": 0.75,
"learning_rate": 0.0001754372704620164,
"loss": 1.7259,
"step": 141
},
{
"epoch": 0.76,
"learning_rate": 0.00017505691775255745,
"loss": 1.6804,
"step": 142
},
{
"epoch": 0.76,
"learning_rate": 0.00017467406186664474,
"loss": 1.6242,
"step": 143
},
{
"epoch": 0.77,
"learning_rate": 0.0001742887155726663,
"loss": 1.7912,
"step": 144
},
{
"epoch": 0.77,
"learning_rate": 0.00017390089172206592,
"loss": 1.5403,
"step": 145
},
{
"epoch": 0.78,
"learning_rate": 0.00017351060324891502,
"loss": 1.5285,
"step": 146
},
{
"epoch": 0.78,
"learning_rate": 0.0001731178631694811,
"loss": 1.5643,
"step": 147
},
{
"epoch": 0.79,
"learning_rate": 0.00017272268458179353,
"loss": 1.425,
"step": 148
},
{
"epoch": 0.79,
"learning_rate": 0.00017232508066520702,
"loss": 1.4601,
"step": 149
},
{
"epoch": 0.8,
"learning_rate": 0.00017192506467996174,
"loss": 1.3357,
"step": 150
},
{
"epoch": 0.81,
"learning_rate": 0.00017152264996674136,
"loss": 1.6524,
"step": 151
},
{
"epoch": 0.81,
"learning_rate": 0.00017111784994622804,
"loss": 1.181,
"step": 152
},
{
"epoch": 0.82,
"learning_rate": 0.00017071067811865476,
"loss": 1.7122,
"step": 153
},
{
"epoch": 0.82,
"learning_rate": 0.00017030114806335526,
"loss": 2.103,
"step": 154
},
{
"epoch": 0.83,
"learning_rate": 0.00016988927343831095,
"loss": 1.5937,
"step": 155
},
{
"epoch": 0.83,
"learning_rate": 0.00016947506797969562,
"loss": 1.5736,
"step": 156
},
{
"epoch": 0.84,
"learning_rate": 0.00016905854550141716,
"loss": 1.2724,
"step": 157
},
{
"epoch": 0.84,
"learning_rate": 0.00016863971989465698,
"loss": 1.224,
"step": 158
},
{
"epoch": 0.85,
"learning_rate": 0.00016821860512740671,
"loss": 1.1956,
"step": 159
},
{
"epoch": 0.85,
"learning_rate": 0.00016779521524400232,
"loss": 1.2721,
"step": 160
}
],
"max_steps": 561,
"num_train_epochs": 3,
"total_flos": 3.107671047831552e+16,
"trial_name": null,
"trial_params": null
}