Llama-3-LongVILA-8B-128Frames / trainer_state.json
ligeng zhu
llama3 legal
852cd7f
raw
history blame
25.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 7.142857142857143e-06,
"loss": 2.0436,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 1.4285714285714285e-05,
"loss": 2.0747,
"step": 2
},
{
"epoch": 0.09,
"learning_rate": 2.1428571428571428e-05,
"loss": 1.9406,
"step": 3
},
{
"epoch": 0.11,
"learning_rate": 2.857142857142857e-05,
"loss": 2.2945,
"step": 4
},
{
"epoch": 0.14,
"learning_rate": 3.571428571428572e-05,
"loss": 2.0633,
"step": 5
},
{
"epoch": 0.17,
"learning_rate": 4.2857142857142856e-05,
"loss": 1.9623,
"step": 6
},
{
"epoch": 0.2,
"learning_rate": 5e-05,
"loss": 2.0427,
"step": 7
},
{
"epoch": 0.23,
"learning_rate": 4.9997006294917125e-05,
"loss": 1.687,
"step": 8
},
{
"epoch": 0.26,
"learning_rate": 4.9988025896650096e-05,
"loss": 2.1023,
"step": 9
},
{
"epoch": 0.29,
"learning_rate": 4.997306095597203e-05,
"loss": 2.0172,
"step": 10
},
{
"epoch": 0.31,
"learning_rate": 4.9952115056932445e-05,
"loss": 1.9736,
"step": 11
},
{
"epoch": 0.34,
"learning_rate": 4.99251932159989e-05,
"loss": 2.0402,
"step": 12
},
{
"epoch": 0.37,
"learning_rate": 4.989230188085556e-05,
"loss": 1.9319,
"step": 13
},
{
"epoch": 0.4,
"learning_rate": 4.985344892885899e-05,
"loss": 1.9959,
"step": 14
},
{
"epoch": 0.43,
"learning_rate": 4.980864366515159e-05,
"loss": 2.0264,
"step": 15
},
{
"epoch": 0.46,
"learning_rate": 4.975789682043301e-05,
"loss": 1.9565,
"step": 16
},
{
"epoch": 0.49,
"learning_rate": 4.970122054839022e-05,
"loss": 1.7632,
"step": 17
},
{
"epoch": 0.51,
"learning_rate": 4.963862842278669e-05,
"loss": 1.9154,
"step": 18
},
{
"epoch": 0.54,
"learning_rate": 4.957013543421161e-05,
"loss": 1.9812,
"step": 19
},
{
"epoch": 0.57,
"learning_rate": 4.9495757986489614e-05,
"loss": 1.8412,
"step": 20
},
{
"epoch": 0.6,
"learning_rate": 4.941551389275217e-05,
"loss": 1.7311,
"step": 21
},
{
"epoch": 0.63,
"learning_rate": 4.932942237117137e-05,
"loss": 1.8453,
"step": 22
},
{
"epoch": 0.66,
"learning_rate": 4.9237504040357286e-05,
"loss": 2.0415,
"step": 23
},
{
"epoch": 0.69,
"learning_rate": 4.913978091441985e-05,
"loss": 1.9249,
"step": 24
},
{
"epoch": 0.71,
"learning_rate": 4.903627639769656e-05,
"loss": 1.8312,
"step": 25
},
{
"epoch": 0.74,
"learning_rate": 4.8927015279147245e-05,
"loss": 1.9261,
"step": 26
},
{
"epoch": 0.77,
"learning_rate": 4.881202372641719e-05,
"loss": 1.9458,
"step": 27
},
{
"epoch": 0.8,
"learning_rate": 4.869132927957007e-05,
"loss": 1.8691,
"step": 28
},
{
"epoch": 0.83,
"learning_rate": 4.856496084449218e-05,
"loss": 1.7991,
"step": 29
},
{
"epoch": 0.86,
"learning_rate": 4.8432948685969646e-05,
"loss": 1.8221,
"step": 30
},
{
"epoch": 0.89,
"learning_rate": 4.829532442044008e-05,
"loss": 1.9247,
"step": 31
},
{
"epoch": 0.91,
"learning_rate": 4.8152121008420524e-05,
"loss": 1.8928,
"step": 32
},
{
"epoch": 0.94,
"learning_rate": 4.8003372746613585e-05,
"loss": 1.9307,
"step": 33
},
{
"epoch": 0.97,
"learning_rate": 4.784911525969345e-05,
"loss": 1.8312,
"step": 34
},
{
"epoch": 1.0,
"learning_rate": 4.768938549177393e-05,
"loss": 1.975,
"step": 35
},
{
"epoch": 1.03,
"learning_rate": 4.752422169756048e-05,
"loss": 1.9311,
"step": 36
},
{
"epoch": 1.06,
"learning_rate": 4.735366343318832e-05,
"loss": 1.7779,
"step": 37
},
{
"epoch": 1.09,
"learning_rate": 4.7177751546748885e-05,
"loss": 1.8819,
"step": 38
},
{
"epoch": 1.11,
"learning_rate": 4.6996528168506864e-05,
"loss": 1.9532,
"step": 39
},
{
"epoch": 1.14,
"learning_rate": 4.6810036700810143e-05,
"loss": 1.8492,
"step": 40
},
{
"epoch": 1.17,
"learning_rate": 4.6618321807695095e-05,
"loss": 2.0124,
"step": 41
},
{
"epoch": 1.2,
"learning_rate": 4.642142940418973e-05,
"loss": 1.7269,
"step": 42
},
{
"epoch": 1.23,
"learning_rate": 4.621940664531718e-05,
"loss": 1.9515,
"step": 43
},
{
"epoch": 1.26,
"learning_rate": 4.601230191480224e-05,
"loss": 1.8711,
"step": 44
},
{
"epoch": 1.29,
"learning_rate": 4.580016481348367e-05,
"loss": 1.94,
"step": 45
},
{
"epoch": 1.31,
"learning_rate": 4.558304614743496e-05,
"loss": 1.8501,
"step": 46
},
{
"epoch": 1.34,
"learning_rate": 4.536099791579643e-05,
"loss": 1.7961,
"step": 47
},
{
"epoch": 1.37,
"learning_rate": 4.5134073298321655e-05,
"loss": 1.8661,
"step": 48
},
{
"epoch": 1.4,
"learning_rate": 4.4902326642641095e-05,
"loss": 1.7798,
"step": 49
},
{
"epoch": 1.43,
"learning_rate": 4.466581345124604e-05,
"loss": 1.851,
"step": 50
},
{
"epoch": 1.46,
"learning_rate": 4.442459036819595e-05,
"loss": 1.8324,
"step": 51
},
{
"epoch": 1.49,
"learning_rate": 4.417871516555241e-05,
"loss": 1.7448,
"step": 52
},
{
"epoch": 1.51,
"learning_rate": 4.392824672954294e-05,
"loss": 1.7811,
"step": 53
},
{
"epoch": 1.54,
"learning_rate": 4.3673245046457924e-05,
"loss": 1.8318,
"step": 54
},
{
"epoch": 1.57,
"learning_rate": 4.341377118828415e-05,
"loss": 1.8495,
"step": 55
},
{
"epoch": 1.6,
"learning_rate": 4.3149887298078276e-05,
"loss": 1.8712,
"step": 56
},
{
"epoch": 1.63,
"learning_rate": 4.288165657508376e-05,
"loss": 1.9371,
"step": 57
},
{
"epoch": 1.66,
"learning_rate": 4.2609143259594906e-05,
"loss": 1.9462,
"step": 58
},
{
"epoch": 1.69,
"learning_rate": 4.233241261757155e-05,
"loss": 2.0101,
"step": 59
},
{
"epoch": 1.71,
"learning_rate": 4.2051530925008053e-05,
"loss": 1.8443,
"step": 60
},
{
"epoch": 1.74,
"learning_rate": 4.1766565452060466e-05,
"loss": 1.9715,
"step": 61
},
{
"epoch": 1.77,
"learning_rate": 4.147758444693557e-05,
"loss": 1.8938,
"step": 62
},
{
"epoch": 1.8,
"learning_rate": 4.118465711954569e-05,
"loss": 1.9364,
"step": 63
},
{
"epoch": 1.83,
"learning_rate": 4.088785362493314e-05,
"loss": 1.7777,
"step": 64
},
{
"epoch": 1.86,
"learning_rate": 4.058724504646834e-05,
"loss": 1.6773,
"step": 65
},
{
"epoch": 1.89,
"learning_rate": 4.028290337882565e-05,
"loss": 1.7501,
"step": 66
},
{
"epoch": 1.91,
"learning_rate": 3.997490151074085e-05,
"loss": 1.8379,
"step": 67
},
{
"epoch": 1.94,
"learning_rate": 3.9663313207554574e-05,
"loss": 1.8593,
"step": 68
},
{
"epoch": 1.97,
"learning_rate": 3.93482130935458e-05,
"loss": 1.8162,
"step": 69
},
{
"epoch": 2.0,
"learning_rate": 3.902967663405956e-05,
"loss": 1.8169,
"step": 70
},
{
"epoch": 2.03,
"learning_rate": 3.870778011743327e-05,
"loss": 1.8185,
"step": 71
},
{
"epoch": 2.06,
"learning_rate": 3.8382600636725984e-05,
"loss": 1.8404,
"step": 72
},
{
"epoch": 2.09,
"learning_rate": 3.805421607125482e-05,
"loss": 1.6879,
"step": 73
},
{
"epoch": 2.11,
"learning_rate": 3.772270506794323e-05,
"loss": 1.765,
"step": 74
},
{
"epoch": 2.14,
"learning_rate": 3.738814702248524e-05,
"loss": 1.81,
"step": 75
},
{
"epoch": 2.17,
"learning_rate": 3.705062206033055e-05,
"loss": 1.8509,
"step": 76
},
{
"epoch": 2.2,
"learning_rate": 3.671021101749476e-05,
"loss": 1.7403,
"step": 77
},
{
"epoch": 2.23,
"learning_rate": 3.636699542119939e-05,
"loss": 1.9653,
"step": 78
},
{
"epoch": 2.26,
"learning_rate": 3.602105747034646e-05,
"loss": 1.7267,
"step": 79
},
{
"epoch": 2.29,
"learning_rate": 3.5672480015832116e-05,
"loss": 1.8532,
"step": 80
},
{
"epoch": 2.31,
"learning_rate": 3.532134654070415e-05,
"loss": 1.7135,
"step": 81
},
{
"epoch": 2.34,
"learning_rate": 3.496774114016809e-05,
"loss": 1.862,
"step": 82
},
{
"epoch": 2.37,
"learning_rate": 3.461174850144674e-05,
"loss": 1.8093,
"step": 83
},
{
"epoch": 2.4,
"learning_rate": 3.425345388349786e-05,
"loss": 1.7221,
"step": 84
},
{
"epoch": 2.43,
"learning_rate": 3.3892943096594966e-05,
"loss": 1.7018,
"step": 85
},
{
"epoch": 2.46,
"learning_rate": 3.353030248177606e-05,
"loss": 1.7583,
"step": 86
},
{
"epoch": 2.49,
"learning_rate": 3.3165618890165304e-05,
"loss": 1.8001,
"step": 87
},
{
"epoch": 2.51,
"learning_rate": 3.279897966217245e-05,
"loss": 1.908,
"step": 88
},
{
"epoch": 2.54,
"learning_rate": 3.243047260657511e-05,
"loss": 1.6914,
"step": 89
},
{
"epoch": 2.57,
"learning_rate": 3.2060185979488925e-05,
"loss": 1.5934,
"step": 90
},
{
"epoch": 2.6,
"learning_rate": 3.168820846323053e-05,
"loss": 1.7841,
"step": 91
},
{
"epoch": 2.63,
"learning_rate": 3.131462914507838e-05,
"loss": 1.8182,
"step": 92
},
{
"epoch": 2.66,
"learning_rate": 3.093953749593678e-05,
"loss": 1.8664,
"step": 93
},
{
"epoch": 2.69,
"learning_rate": 3.056302334890786e-05,
"loss": 1.8402,
"step": 94
},
{
"epoch": 2.71,
"learning_rate": 3.0185176877776877e-05,
"loss": 1.807,
"step": 95
},
{
"epoch": 2.74,
"learning_rate": 2.9806088575415925e-05,
"loss": 1.7117,
"step": 96
},
{
"epoch": 2.77,
"learning_rate": 2.9425849232111208e-05,
"loss": 1.7541,
"step": 97
},
{
"epoch": 2.8,
"learning_rate": 2.9044549913819124e-05,
"loss": 1.7826,
"step": 98
},
{
"epoch": 2.83,
"learning_rate": 2.8662281940356235e-05,
"loss": 1.8804,
"step": 99
},
{
"epoch": 2.86,
"learning_rate": 2.827913686352856e-05,
"loss": 1.7024,
"step": 100
},
{
"epoch": 2.89,
"learning_rate": 2.7895206445205223e-05,
"loss": 1.8257,
"step": 101
},
{
"epoch": 2.91,
"learning_rate": 2.7510582635341814e-05,
"loss": 1.8938,
"step": 102
},
{
"epoch": 2.94,
"learning_rate": 2.7125357549958684e-05,
"loss": 1.795,
"step": 103
},
{
"epoch": 2.97,
"learning_rate": 2.6739623449079532e-05,
"loss": 1.9122,
"step": 104
},
{
"epoch": 3.0,
"learning_rate": 2.635347271463544e-05,
"loss": 1.8717,
"step": 105
},
{
"epoch": 3.03,
"learning_rate": 2.5966997828339722e-05,
"loss": 1.8828,
"step": 106
},
{
"epoch": 3.06,
"learning_rate": 2.5580291349538895e-05,
"loss": 1.6019,
"step": 107
},
{
"epoch": 3.09,
"learning_rate": 2.5193445893045052e-05,
"loss": 1.8392,
"step": 108
},
{
"epoch": 3.11,
"learning_rate": 2.480655410695495e-05,
"loss": 1.805,
"step": 109
},
{
"epoch": 3.14,
"learning_rate": 2.4419708650461108e-05,
"loss": 1.8106,
"step": 110
},
{
"epoch": 3.17,
"learning_rate": 2.403300217166028e-05,
"loss": 1.7194,
"step": 111
},
{
"epoch": 3.2,
"learning_rate": 2.3646527285364565e-05,
"loss": 1.7633,
"step": 112
},
{
"epoch": 3.23,
"learning_rate": 2.326037655092047e-05,
"loss": 1.7865,
"step": 113
},
{
"epoch": 3.26,
"learning_rate": 2.287464245004132e-05,
"loss": 1.7708,
"step": 114
},
{
"epoch": 3.29,
"learning_rate": 2.2489417364658192e-05,
"loss": 1.7652,
"step": 115
},
{
"epoch": 3.31,
"learning_rate": 2.210479355479478e-05,
"loss": 1.9105,
"step": 116
},
{
"epoch": 3.34,
"learning_rate": 2.1720863136471444e-05,
"loss": 1.8936,
"step": 117
},
{
"epoch": 3.37,
"learning_rate": 2.133771805964377e-05,
"loss": 1.7525,
"step": 118
},
{
"epoch": 3.4,
"learning_rate": 2.0955450086180882e-05,
"loss": 1.8283,
"step": 119
},
{
"epoch": 3.43,
"learning_rate": 2.0574150767888794e-05,
"loss": 1.663,
"step": 120
},
{
"epoch": 3.46,
"learning_rate": 2.019391142458408e-05,
"loss": 1.705,
"step": 121
},
{
"epoch": 3.49,
"learning_rate": 1.9814823122223126e-05,
"loss": 1.7988,
"step": 122
},
{
"epoch": 3.51,
"learning_rate": 1.9436976651092144e-05,
"loss": 1.7041,
"step": 123
},
{
"epoch": 3.54,
"learning_rate": 1.906046250406323e-05,
"loss": 1.7457,
"step": 124
},
{
"epoch": 3.57,
"learning_rate": 1.868537085492163e-05,
"loss": 1.648,
"step": 125
},
{
"epoch": 3.6,
"learning_rate": 1.8311791536769483e-05,
"loss": 1.7847,
"step": 126
},
{
"epoch": 3.63,
"learning_rate": 1.793981402051107e-05,
"loss": 1.6724,
"step": 127
},
{
"epoch": 3.66,
"learning_rate": 1.7569527393424892e-05,
"loss": 1.8516,
"step": 128
},
{
"epoch": 3.69,
"learning_rate": 1.7201020337827556e-05,
"loss": 1.7543,
"step": 129
},
{
"epoch": 3.71,
"learning_rate": 1.6834381109834695e-05,
"loss": 1.6904,
"step": 130
},
{
"epoch": 3.74,
"learning_rate": 1.646969751822394e-05,
"loss": 1.701,
"step": 131
},
{
"epoch": 3.77,
"learning_rate": 1.6107056903405036e-05,
"loss": 1.6526,
"step": 132
},
{
"epoch": 3.8,
"learning_rate": 1.574654611650214e-05,
"loss": 1.8144,
"step": 133
},
{
"epoch": 3.83,
"learning_rate": 1.5388251498553265e-05,
"loss": 1.6833,
"step": 134
},
{
"epoch": 3.86,
"learning_rate": 1.5032258859831915e-05,
"loss": 1.7892,
"step": 135
},
{
"epoch": 3.89,
"learning_rate": 1.467865345929586e-05,
"loss": 1.6905,
"step": 136
},
{
"epoch": 3.91,
"learning_rate": 1.4327519984167886e-05,
"loss": 1.7325,
"step": 137
},
{
"epoch": 3.94,
"learning_rate": 1.397894252965355e-05,
"loss": 1.7709,
"step": 138
},
{
"epoch": 3.97,
"learning_rate": 1.3633004578800612e-05,
"loss": 1.6669,
"step": 139
},
{
"epoch": 4.0,
"learning_rate": 1.328978898250525e-05,
"loss": 1.8431,
"step": 140
},
{
"epoch": 4.03,
"learning_rate": 1.294937793966946e-05,
"loss": 1.7394,
"step": 141
},
{
"epoch": 4.06,
"learning_rate": 1.261185297751477e-05,
"loss": 1.7378,
"step": 142
},
{
"epoch": 4.09,
"learning_rate": 1.2277294932056782e-05,
"loss": 1.7358,
"step": 143
},
{
"epoch": 4.11,
"learning_rate": 1.1945783928745186e-05,
"loss": 1.8278,
"step": 144
},
{
"epoch": 4.14,
"learning_rate": 1.1617399363274022e-05,
"loss": 1.7929,
"step": 145
},
{
"epoch": 4.17,
"learning_rate": 1.1292219882566727e-05,
"loss": 1.6347,
"step": 146
},
{
"epoch": 4.2,
"learning_rate": 1.0970323365940444e-05,
"loss": 1.7681,
"step": 147
},
{
"epoch": 4.23,
"learning_rate": 1.0651786906454193e-05,
"loss": 1.7229,
"step": 148
},
{
"epoch": 4.26,
"learning_rate": 1.0336686792445425e-05,
"loss": 1.5717,
"step": 149
},
{
"epoch": 4.29,
"learning_rate": 1.002509848925916e-05,
"loss": 1.6373,
"step": 150
},
{
"epoch": 4.31,
"learning_rate": 9.717096621174354e-06,
"loss": 1.8171,
"step": 151
},
{
"epoch": 4.34,
"learning_rate": 9.412754953531663e-06,
"loss": 1.716,
"step": 152
},
{
"epoch": 4.37,
"learning_rate": 9.112146375066871e-06,
"loss": 1.7158,
"step": 153
},
{
"epoch": 4.4,
"learning_rate": 8.815342880454311e-06,
"loss": 1.7698,
"step": 154
},
{
"epoch": 4.43,
"learning_rate": 8.522415553064433e-06,
"loss": 1.694,
"step": 155
},
{
"epoch": 4.46,
"learning_rate": 8.23343454793954e-06,
"loss": 1.6029,
"step": 156
},
{
"epoch": 4.49,
"learning_rate": 7.948469074991954e-06,
"loss": 1.7488,
"step": 157
},
{
"epoch": 4.51,
"learning_rate": 7.667587382428454e-06,
"loss": 1.7441,
"step": 158
},
{
"epoch": 4.54,
"learning_rate": 7.390856740405092e-06,
"loss": 1.6584,
"step": 159
},
{
"epoch": 4.57,
"learning_rate": 7.118343424916249e-06,
"loss": 1.7259,
"step": 160
},
{
"epoch": 4.6,
"learning_rate": 6.8501127019217346e-06,
"loss": 1.7216,
"step": 161
},
{
"epoch": 4.63,
"learning_rate": 6.586228811715853e-06,
"loss": 1.6678,
"step": 162
},
{
"epoch": 4.66,
"learning_rate": 6.3267549535420855e-06,
"loss": 1.7258,
"step": 163
},
{
"epoch": 4.69,
"learning_rate": 6.071753270457064e-06,
"loss": 1.6373,
"step": 164
},
{
"epoch": 4.71,
"learning_rate": 5.821284834447585e-06,
"loss": 1.6852,
"step": 165
},
{
"epoch": 4.74,
"learning_rate": 5.5754096318040485e-06,
"loss": 1.8066,
"step": 166
},
{
"epoch": 4.77,
"learning_rate": 5.334186548753961e-06,
"loss": 1.7095,
"step": 167
},
{
"epoch": 4.8,
"learning_rate": 5.097673357358907e-06,
"loss": 1.7511,
"step": 168
},
{
"epoch": 4.83,
"learning_rate": 4.865926701678353e-06,
"loss": 1.7555,
"step": 169
},
{
"epoch": 4.86,
"learning_rate": 4.639002084203575e-06,
"loss": 1.5744,
"step": 170
},
{
"epoch": 4.89,
"learning_rate": 4.416953852565045e-06,
"loss": 1.6493,
"step": 171
},
{
"epoch": 4.91,
"learning_rate": 4.199835186516332e-06,
"loss": 1.7753,
"step": 172
},
{
"epoch": 4.94,
"learning_rate": 3.987698085197761e-06,
"loss": 1.7544,
"step": 173
},
{
"epoch": 4.97,
"learning_rate": 3.7805933546828264e-06,
"loss": 1.7049,
"step": 174
},
{
"epoch": 5.0,
"learning_rate": 3.578570595810274e-06,
"loss": 1.6084,
"step": 175
},
{
"epoch": 5.03,
"learning_rate": 3.3816781923049046e-06,
"loss": 1.8014,
"step": 176
},
{
"epoch": 5.06,
"learning_rate": 3.189963299189863e-06,
"loss": 1.5702,
"step": 177
},
{
"epoch": 5.09,
"learning_rate": 3.0034718314931376e-06,
"loss": 1.7683,
"step": 178
},
{
"epoch": 5.11,
"learning_rate": 2.8222484532511166e-06,
"loss": 1.7868,
"step": 179
},
{
"epoch": 5.14,
"learning_rate": 2.6463365668116857e-06,
"loss": 1.6444,
"step": 180
},
{
"epoch": 5.17,
"learning_rate": 2.475778302439524e-06,
"loss": 1.5679,
"step": 181
},
{
"epoch": 5.2,
"learning_rate": 2.310614508226078e-06,
"loss": 1.7885,
"step": 182
},
{
"epoch": 5.23,
"learning_rate": 2.150884740306558e-06,
"loss": 1.3817,
"step": 183
},
{
"epoch": 5.26,
"learning_rate": 1.9966272533864183e-06,
"loss": 1.6504,
"step": 184
},
{
"epoch": 5.29,
"learning_rate": 1.8478789915794769e-06,
"loss": 1.7736,
"step": 185
},
{
"epoch": 5.31,
"learning_rate": 1.7046755795599222e-06,
"loss": 1.5773,
"step": 186
},
{
"epoch": 5.34,
"learning_rate": 1.567051314030349e-06,
"loss": 1.6765,
"step": 187
},
{
"epoch": 5.37,
"learning_rate": 1.4350391555078251e-06,
"loss": 1.7248,
"step": 188
},
{
"epoch": 5.4,
"learning_rate": 1.3086707204299414e-06,
"loss": 1.7631,
"step": 189
},
{
"epoch": 5.43,
"learning_rate": 1.1879762735828081e-06,
"loss": 1.7554,
"step": 190
},
{
"epoch": 5.46,
"learning_rate": 1.0729847208527517e-06,
"loss": 1.7072,
"step": 191
},
{
"epoch": 5.49,
"learning_rate": 9.6372360230344e-07,
"loss": 1.6505,
"step": 192
},
{
"epoch": 5.51,
"learning_rate": 8.602190855801523e-07,
"loss": 1.7094,
"step": 193
},
{
"epoch": 5.54,
"learning_rate": 7.624959596427145e-07,
"loss": 1.8452,
"step": 194
},
{
"epoch": 5.57,
"learning_rate": 6.70577628828628e-07,
"loss": 1.7456,
"step": 195
},
{
"epoch": 5.6,
"learning_rate": 5.844861072478336e-07,
"loss": 1.7514,
"step": 196
},
{
"epoch": 5.63,
"learning_rate": 5.042420135103865e-07,
"loss": 1.7269,
"step": 197
},
{
"epoch": 5.66,
"learning_rate": 4.298645657883904e-07,
"loss": 1.7689,
"step": 198
},
{
"epoch": 5.69,
"learning_rate": 3.613715772133097e-07,
"loss": 1.7866,
"step": 199
},
{
"epoch": 5.71,
"learning_rate": 2.987794516097875e-07,
"loss": 1.8182,
"step": 200
},
{
"epoch": 5.74,
"learning_rate": 2.4210317956698815e-07,
"loss": 1.2477,
"step": 201
},
{
"epoch": 5.77,
"learning_rate": 1.9135633484841097e-07,
"loss": 1.2515,
"step": 202
},
{
"epoch": 5.8,
"learning_rate": 1.4655107114101007e-07,
"loss": 1.1149,
"step": 203
},
{
"epoch": 5.83,
"learning_rate": 1.0769811914444205e-07,
"loss": 1.1842,
"step": 204
},
{
"epoch": 5.86,
"learning_rate": 7.480678400109963e-08,
"loss": 1.1749,
"step": 205
},
{
"epoch": 5.89,
"learning_rate": 4.7884943067555424e-08,
"loss": 1.1625,
"step": 206
},
{
"epoch": 5.91,
"learning_rate": 2.6939044027973757e-08,
"loss": 1.1117,
"step": 207
},
{
"epoch": 5.94,
"learning_rate": 1.1974103349909893e-08,
"loss": 1.1215,
"step": 208
},
{
"epoch": 5.97,
"learning_rate": 2.993705082879328e-09,
"loss": 1.1948,
"step": 209
},
{
"epoch": 6.0,
"learning_rate": 0.0,
"loss": 1.2632,
"step": 210
},
{
"epoch": 6.0,
"step": 210,
"total_flos": 0.0,
"train_loss": 0.056318444297427224,
"train_runtime": 1472.1497,
"train_samples_per_second": 289.312,
"train_steps_per_second": 0.143
}
],
"logging_steps": 1.0,
"max_steps": 210,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 50,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}