zephyr-7b-uf-rlced-conifer-dpo-2e / trainer_state.json
NicholasCorrado's picture
Model save
adf1bdb verified
raw
history blame
78 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9986120749479528,
"eval_steps": 1000,
"global_step": 1440,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013879250520471894,
"grad_norm": 7.310771886732375,
"learning_rate": 3.4722222222222217e-09,
"logits/chosen": -2.658149242401123,
"logits/rejected": -2.6729652881622314,
"logps/chosen": -310.6693115234375,
"logps/rejected": -336.3360595703125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.013879250520471894,
"grad_norm": 6.989456718606319,
"learning_rate": 3.472222222222222e-08,
"logits/chosen": -2.7207493782043457,
"logits/rejected": -2.678452968597412,
"logps/chosen": -329.1288146972656,
"logps/rejected": -334.9566650390625,
"loss": 0.6932,
"rewards/accuracies": 0.4236111044883728,
"rewards/chosen": -0.00011655675189103931,
"rewards/margins": -0.00021387077867984772,
"rewards/rejected": 9.731399040902033e-05,
"step": 10
},
{
"epoch": 0.027758501040943788,
"grad_norm": 7.28858517103727,
"learning_rate": 6.944444444444444e-08,
"logits/chosen": -2.690325975418091,
"logits/rejected": -2.6607680320739746,
"logps/chosen": -317.3731384277344,
"logps/rejected": -331.0643005371094,
"loss": 0.6928,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": 0.00018671144789550453,
"rewards/margins": 0.00033597589936107397,
"rewards/rejected": -0.00014926446601748466,
"step": 20
},
{
"epoch": 0.041637751561415685,
"grad_norm": 7.1293734693123785,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -2.6790878772735596,
"logits/rejected": -2.645470142364502,
"logps/chosen": -351.1817321777344,
"logps/rejected": -351.5977478027344,
"loss": 0.6914,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": 0.0023157999385148287,
"rewards/margins": 0.004029616713523865,
"rewards/rejected": -0.0017138172406703234,
"step": 30
},
{
"epoch": 0.055517002081887576,
"grad_norm": 7.102945109434136,
"learning_rate": 1.3888888888888888e-07,
"logits/chosen": -2.698319435119629,
"logits/rejected": -2.6377086639404297,
"logps/chosen": -352.13153076171875,
"logps/rejected": -337.7492370605469,
"loss": 0.6867,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.010404938831925392,
"rewards/margins": 0.012554061599075794,
"rewards/rejected": -0.0021491218358278275,
"step": 40
},
{
"epoch": 0.06939625260235947,
"grad_norm": 6.472817798536912,
"learning_rate": 1.736111111111111e-07,
"logits/chosen": -2.6700968742370605,
"logits/rejected": -2.656541347503662,
"logps/chosen": -325.9363708496094,
"logps/rejected": -359.7778625488281,
"loss": 0.6778,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 0.026792842894792557,
"rewards/margins": 0.031667567789554596,
"rewards/rejected": -0.004874727688729763,
"step": 50
},
{
"epoch": 0.08327550312283137,
"grad_norm": 7.812961434750235,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -2.7250404357910156,
"logits/rejected": -2.680410861968994,
"logps/chosen": -325.12628173828125,
"logps/rejected": -345.4599914550781,
"loss": 0.6575,
"rewards/accuracies": 0.815625011920929,
"rewards/chosen": 0.05238135904073715,
"rewards/margins": 0.06940947473049164,
"rewards/rejected": -0.017028113827109337,
"step": 60
},
{
"epoch": 0.09715475364330327,
"grad_norm": 8.252964521813333,
"learning_rate": 2.4305555555555555e-07,
"logits/chosen": -2.6680197715759277,
"logits/rejected": -2.6488897800445557,
"logps/chosen": -336.755615234375,
"logps/rejected": -372.3600158691406,
"loss": 0.6231,
"rewards/accuracies": 0.809374988079071,
"rewards/chosen": 0.04352443665266037,
"rewards/margins": 0.14054368436336517,
"rewards/rejected": -0.0970192551612854,
"step": 70
},
{
"epoch": 0.11103400416377515,
"grad_norm": 10.907322040862939,
"learning_rate": 2.7777777777777776e-07,
"logits/chosen": -2.6945643424987793,
"logits/rejected": -2.635817050933838,
"logps/chosen": -345.4059143066406,
"logps/rejected": -395.4034423828125,
"loss": 0.5649,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.05337335914373398,
"rewards/margins": 0.348626971244812,
"rewards/rejected": -0.402000367641449,
"step": 80
},
{
"epoch": 0.12491325468424705,
"grad_norm": 14.954457613666033,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.7117972373962402,
"logits/rejected": -2.659916877746582,
"logps/chosen": -357.34344482421875,
"logps/rejected": -424.046875,
"loss": 0.4833,
"rewards/accuracies": 0.815625011920929,
"rewards/chosen": -0.24065232276916504,
"rewards/margins": 0.6489619016647339,
"rewards/rejected": -0.8896142840385437,
"step": 90
},
{
"epoch": 0.13879250520471895,
"grad_norm": 18.8809663980661,
"learning_rate": 3.472222222222222e-07,
"logits/chosen": -2.691723108291626,
"logits/rejected": -2.676143169403076,
"logps/chosen": -429.1051330566406,
"logps/rejected": -508.2496032714844,
"loss": 0.4648,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": -0.836562991142273,
"rewards/margins": 0.7794925570487976,
"rewards/rejected": -1.6160557270050049,
"step": 100
},
{
"epoch": 0.15267175572519084,
"grad_norm": 17.153622207375392,
"learning_rate": 3.819444444444444e-07,
"logits/chosen": -2.7029433250427246,
"logits/rejected": -2.6785738468170166,
"logps/chosen": -463.09588623046875,
"logps/rejected": -551.4292602539062,
"loss": 0.4393,
"rewards/accuracies": 0.78125,
"rewards/chosen": -1.1502994298934937,
"rewards/margins": 0.9367601275444031,
"rewards/rejected": -2.087059736251831,
"step": 110
},
{
"epoch": 0.16655100624566274,
"grad_norm": 17.322428984395128,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.740248203277588,
"logits/rejected": -2.715928554534912,
"logps/chosen": -466.61614990234375,
"logps/rejected": -588.0739135742188,
"loss": 0.416,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -1.314789891242981,
"rewards/margins": 1.0913175344467163,
"rewards/rejected": -2.4061074256896973,
"step": 120
},
{
"epoch": 0.18043025676613464,
"grad_norm": 17.727320951964273,
"learning_rate": 4.513888888888889e-07,
"logits/chosen": -2.732884168624878,
"logits/rejected": -2.693039894104004,
"logps/chosen": -470.36712646484375,
"logps/rejected": -637.1500854492188,
"loss": 0.3922,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": -1.2835638523101807,
"rewards/margins": 1.5643529891967773,
"rewards/rejected": -2.847916841506958,
"step": 130
},
{
"epoch": 0.19430950728660654,
"grad_norm": 23.308808504878304,
"learning_rate": 4.861111111111111e-07,
"logits/chosen": -2.410512924194336,
"logits/rejected": -2.2582736015319824,
"logps/chosen": -466.9541931152344,
"logps/rejected": -714.3003540039062,
"loss": 0.355,
"rewards/accuracies": 0.846875011920929,
"rewards/chosen": -1.4372889995574951,
"rewards/margins": 2.2647476196289062,
"rewards/rejected": -3.7020363807678223,
"step": 140
},
{
"epoch": 0.2081887578070784,
"grad_norm": 15.792878048667099,
"learning_rate": 4.999735579817769e-07,
"logits/chosen": -1.8029206991195679,
"logits/rejected": -1.4160174131393433,
"logps/chosen": -489.02960205078125,
"logps/rejected": -732.9075927734375,
"loss": 0.3505,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.6447805166244507,
"rewards/margins": 2.2803826332092285,
"rewards/rejected": -3.9251632690429688,
"step": 150
},
{
"epoch": 0.2220680083275503,
"grad_norm": 27.767234596146235,
"learning_rate": 4.998119881260575e-07,
"logits/chosen": -1.729731559753418,
"logits/rejected": -1.0307753086090088,
"logps/chosen": -489.806396484375,
"logps/rejected": -750.17333984375,
"loss": 0.3419,
"rewards/accuracies": 0.809374988079071,
"rewards/chosen": -1.7482877969741821,
"rewards/margins": 2.432042121887207,
"rewards/rejected": -4.1803297996521,
"step": 160
},
{
"epoch": 0.2359472588480222,
"grad_norm": 16.170525819937215,
"learning_rate": 4.995036332451857e-07,
"logits/chosen": -1.9091663360595703,
"logits/rejected": -0.9273399114608765,
"logps/chosen": -495.46661376953125,
"logps/rejected": -758.0303955078125,
"loss": 0.3289,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": -1.4501034021377563,
"rewards/margins": 2.5703396797180176,
"rewards/rejected": -4.020443439483643,
"step": 170
},
{
"epoch": 0.2498265093684941,
"grad_norm": 19.933464291596422,
"learning_rate": 4.990486745229364e-07,
"logits/chosen": -1.4646329879760742,
"logits/rejected": -0.523992121219635,
"logps/chosen": -475.5484924316406,
"logps/rejected": -712.8101806640625,
"loss": 0.3238,
"rewards/accuracies": 0.8218749761581421,
"rewards/chosen": -1.4877039194107056,
"rewards/margins": 2.2666311264038086,
"rewards/rejected": -3.7543349266052246,
"step": 180
},
{
"epoch": 0.263705759888966,
"grad_norm": 13.788976405207086,
"learning_rate": 4.984473792848607e-07,
"logits/chosen": -1.0008184909820557,
"logits/rejected": 0.3863092064857483,
"logps/chosen": -459.58367919921875,
"logps/rejected": -734.6851196289062,
"loss": 0.3131,
"rewards/accuracies": 0.84375,
"rewards/chosen": -1.3406341075897217,
"rewards/margins": 2.7795493602752686,
"rewards/rejected": -4.120182991027832,
"step": 190
},
{
"epoch": 0.2775850104094379,
"grad_norm": 20.384550977715232,
"learning_rate": 4.977001008412112e-07,
"logits/chosen": -0.7814763188362122,
"logits/rejected": 0.5796900987625122,
"logps/chosen": -522.8018798828125,
"logps/rejected": -799.6663208007812,
"loss": 0.333,
"rewards/accuracies": 0.828125,
"rewards/chosen": -1.9407739639282227,
"rewards/margins": 2.682741165161133,
"rewards/rejected": -4.6235151290893555,
"step": 200
},
{
"epoch": 0.2914642609299098,
"grad_norm": 18.232716128762664,
"learning_rate": 4.968072782793435e-07,
"logits/chosen": -0.9184268116950989,
"logits/rejected": 0.4480651319026947,
"logps/chosen": -499.62127685546875,
"logps/rejected": -834.462890625,
"loss": 0.3089,
"rewards/accuracies": 0.846875011920929,
"rewards/chosen": -1.7784448862075806,
"rewards/margins": 3.2359156608581543,
"rewards/rejected": -5.0143609046936035,
"step": 210
},
{
"epoch": 0.3053435114503817,
"grad_norm": 19.076527739229473,
"learning_rate": 4.957694362057149e-07,
"logits/chosen": -0.9672171473503113,
"logits/rejected": 0.16848711669445038,
"logps/chosen": -560.64453125,
"logps/rejected": -855.81591796875,
"loss": 0.3149,
"rewards/accuracies": 0.846875011920929,
"rewards/chosen": -1.9354803562164307,
"rewards/margins": 3.0875911712646484,
"rewards/rejected": -5.0230712890625,
"step": 220
},
{
"epoch": 0.3192227619708536,
"grad_norm": 16.258599026908875,
"learning_rate": 4.945871844376368e-07,
"logits/chosen": -1.0649207830429077,
"logits/rejected": 0.31253132224082947,
"logps/chosen": -538.4542846679688,
"logps/rejected": -874.3626098632812,
"loss": 0.3188,
"rewards/accuracies": 0.8968750238418579,
"rewards/chosen": -1.9632246494293213,
"rewards/margins": 3.402304172515869,
"rewards/rejected": -5.365528583526611,
"step": 230
},
{
"epoch": 0.3331020124913255,
"grad_norm": 18.705612187457067,
"learning_rate": 4.932612176449559e-07,
"logits/chosen": -0.9302694201469421,
"logits/rejected": 0.36828285455703735,
"logps/chosen": -505.27606201171875,
"logps/rejected": -816.0523681640625,
"loss": 0.3025,
"rewards/accuracies": 0.859375,
"rewards/chosen": -1.567478895187378,
"rewards/margins": 2.9575300216674805,
"rewards/rejected": -4.525008678436279,
"step": 240
},
{
"epoch": 0.3469812630117974,
"grad_norm": 24.629743659903227,
"learning_rate": 4.917923149418791e-07,
"logits/chosen": 0.6533899307250977,
"logits/rejected": 1.9370393753051758,
"logps/chosen": -599.1996459960938,
"logps/rejected": -975.5636596679688,
"loss": 0.2998,
"rewards/accuracies": 0.84375,
"rewards/chosen": -2.498326301574707,
"rewards/margins": 3.716355562210083,
"rewards/rejected": -6.214681625366211,
"step": 250
},
{
"epoch": 0.3608605135322693,
"grad_norm": 17.05308334694298,
"learning_rate": 4.901813394291801e-07,
"logits/chosen": -0.2267475426197052,
"logits/rejected": 1.061628818511963,
"logps/chosen": -510.58990478515625,
"logps/rejected": -825.64892578125,
"loss": 0.3068,
"rewards/accuracies": 0.8343750238418579,
"rewards/chosen": -1.8009761571884155,
"rewards/margins": 3.0452382564544678,
"rewards/rejected": -4.846214771270752,
"step": 260
},
{
"epoch": 0.3747397640527412,
"grad_norm": 16.458651892780324,
"learning_rate": 4.884292376870567e-07,
"logits/chosen": -0.5495095252990723,
"logits/rejected": 0.9298044443130493,
"logps/chosen": -518.9110107421875,
"logps/rejected": -838.7022705078125,
"loss": 0.3019,
"rewards/accuracies": 0.8218749761581421,
"rewards/chosen": -1.7417328357696533,
"rewards/margins": 3.202885866165161,
"rewards/rejected": -4.944618225097656,
"step": 270
},
{
"epoch": 0.3886190145732131,
"grad_norm": 17.412372040942056,
"learning_rate": 4.865370392189376e-07,
"logits/chosen": -1.044090986251831,
"logits/rejected": 0.6990992426872253,
"logps/chosen": -521.0147094726562,
"logps/rejected": -908.5431518554688,
"loss": 0.3086,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -1.7731910943984985,
"rewards/margins": 3.855863571166992,
"rewards/rejected": -5.629055023193359,
"step": 280
},
{
"epoch": 0.4024982650936849,
"grad_norm": 18.962541699001296,
"learning_rate": 4.845058558465645e-07,
"logits/chosen": 0.2687569260597229,
"logits/rejected": 1.7490098476409912,
"logps/chosen": -562.8821411132812,
"logps/rejected": -911.21142578125,
"loss": 0.2941,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.1526246070861816,
"rewards/margins": 3.554647445678711,
"rewards/rejected": -5.707272052764893,
"step": 290
},
{
"epoch": 0.4163775156141568,
"grad_norm": 18.283817174579646,
"learning_rate": 4.823368810567056e-07,
"logits/chosen": 0.27320951223373413,
"logits/rejected": 1.6727936267852783,
"logps/chosen": -508.94744873046875,
"logps/rejected": -807.9446411132812,
"loss": 0.3081,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -1.861353874206543,
"rewards/margins": 3.0898642539978027,
"rewards/rejected": -4.951218605041504,
"step": 300
},
{
"epoch": 0.4302567661346287,
"grad_norm": 15.422967764865048,
"learning_rate": 4.800313892998847e-07,
"logits/chosen": -0.3293236196041107,
"logits/rejected": 1.3189566135406494,
"logps/chosen": -509.6499938964844,
"logps/rejected": -856.3865966796875,
"loss": 0.2919,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.683760404586792,
"rewards/margins": 3.3120670318603516,
"rewards/rejected": -4.995827674865723,
"step": 310
},
{
"epoch": 0.4441360166551006,
"grad_norm": 18.804961569512887,
"learning_rate": 4.775907352415367e-07,
"logits/chosen": 0.22868101298809052,
"logits/rejected": 1.9417445659637451,
"logps/chosen": -556.9364013671875,
"logps/rejected": -950.5734252929688,
"loss": 0.2808,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -1.9100916385650635,
"rewards/margins": 4.022894859313965,
"rewards/rejected": -5.932986259460449,
"step": 320
},
{
"epoch": 0.4580152671755725,
"grad_norm": 17.099019764148846,
"learning_rate": 4.7501635296603025e-07,
"logits/chosen": 1.049659013748169,
"logits/rejected": 2.4073307514190674,
"logps/chosen": -531.3202514648438,
"logps/rejected": -922.6915893554688,
"loss": 0.292,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.8814361095428467,
"rewards/margins": 3.822434902191162,
"rewards/rejected": -5.7038702964782715,
"step": 330
},
{
"epoch": 0.4718945176960444,
"grad_norm": 18.898740164063316,
"learning_rate": 4.723097551340265e-07,
"logits/chosen": 1.2799204587936401,
"logits/rejected": 2.3121683597564697,
"logps/chosen": -494.0704040527344,
"logps/rejected": -854.8629760742188,
"loss": 0.292,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": -1.7092489004135132,
"rewards/margins": 3.4398162364959717,
"rewards/rejected": -5.149065971374512,
"step": 340
},
{
"epoch": 0.4857737682165163,
"grad_norm": 20.5834993963944,
"learning_rate": 4.6947253209366613e-07,
"logits/chosen": 1.0228136777877808,
"logits/rejected": 2.397416114807129,
"logps/chosen": -545.7423706054688,
"logps/rejected": -912.7175903320312,
"loss": 0.2827,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.025628089904785,
"rewards/margins": 3.6154162883758545,
"rewards/rejected": -5.6410441398620605,
"step": 350
},
{
"epoch": 0.4996530187369882,
"grad_norm": 14.253483616771307,
"learning_rate": 4.6650635094610966e-07,
"logits/chosen": 1.0244512557983398,
"logits/rejected": 2.2805395126342773,
"logps/chosen": -517.4966430664062,
"logps/rejected": -885.3656005859375,
"loss": 0.2767,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -1.9133589267730713,
"rewards/margins": 3.5970351696014404,
"rewards/rejected": -5.510394096374512,
"step": 360
},
{
"epoch": 0.5135322692574601,
"grad_norm": 23.570388915292295,
"learning_rate": 4.6341295456597906e-07,
"logits/chosen": 0.6538206338882446,
"logits/rejected": 1.800736665725708,
"logps/chosen": -486.4225158691406,
"logps/rejected": -818.6160278320312,
"loss": 0.2972,
"rewards/accuracies": 0.840624988079071,
"rewards/chosen": -1.574295997619629,
"rewards/margins": 3.247091770172119,
"rewards/rejected": -4.821388244628906,
"step": 370
},
{
"epoch": 0.527411519777932,
"grad_norm": 22.92056805222348,
"learning_rate": 4.6019416057727577e-07,
"logits/chosen": 0.5617297291755676,
"logits/rejected": 1.9416172504425049,
"logps/chosen": -554.2542724609375,
"logps/rejected": -1017.8567504882812,
"loss": 0.2817,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.9359180927276611,
"rewards/margins": 4.510493278503418,
"rewards/rejected": -6.4464111328125,
"step": 380
},
{
"epoch": 0.5412907702984039,
"grad_norm": 21.96046117437538,
"learning_rate": 4.5685186028537756e-07,
"logits/chosen": 0.08615957945585251,
"logits/rejected": 1.9250423908233643,
"logps/chosen": -521.3770751953125,
"logps/rejected": -1026.9183349609375,
"loss": 0.2752,
"rewards/accuracies": 0.903124988079071,
"rewards/chosen": -1.688780426979065,
"rewards/margins": 4.929998397827148,
"rewards/rejected": -6.618779182434082,
"step": 390
},
{
"epoch": 0.5551700208188758,
"grad_norm": 16.35709631938306,
"learning_rate": 4.5338801756574185e-07,
"logits/chosen": 0.8631726503372192,
"logits/rejected": 2.426805019378662,
"logps/chosen": -550.8397216796875,
"logps/rejected": -960.1754760742188,
"loss": 0.2975,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.955361008644104,
"rewards/margins": 4.027670860290527,
"rewards/rejected": -5.983031749725342,
"step": 400
},
{
"epoch": 0.5690492713393477,
"grad_norm": 14.715456208460227,
"learning_rate": 4.498046677099674e-07,
"logits/chosen": 0.137635737657547,
"logits/rejected": 1.9270877838134766,
"logps/chosen": -470.42767333984375,
"logps/rejected": -795.9421997070312,
"loss": 0.2826,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -1.4737566709518433,
"rewards/margins": 3.3337607383728027,
"rewards/rejected": -4.807517051696777,
"step": 410
},
{
"epoch": 0.5829285218598196,
"grad_norm": 15.380893236552625,
"learning_rate": 4.461039162298939e-07,
"logits/chosen": 1.0668681859970093,
"logits/rejected": 2.6232194900512695,
"logps/chosen": -546.1395874023438,
"logps/rejected": -970.1839599609375,
"loss": 0.271,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": -1.9840848445892334,
"rewards/margins": 4.236409664154053,
"rewards/rejected": -6.220494270324707,
"step": 420
},
{
"epoch": 0.5968077723802915,
"grad_norm": 20.12909286909629,
"learning_rate": 4.4228793762044126e-07,
"logits/chosen": 1.8645107746124268,
"logits/rejected": 3.282402515411377,
"logps/chosen": -556.1341552734375,
"logps/rejected": -999.4012451171875,
"loss": 0.286,
"rewards/accuracies": 0.859375,
"rewards/chosen": -2.260993719100952,
"rewards/margins": 4.435044288635254,
"rewards/rejected": -6.696038246154785,
"step": 430
},
{
"epoch": 0.6106870229007634,
"grad_norm": 16.373631758502654,
"learning_rate": 4.3835897408191513e-07,
"logits/chosen": 1.3662347793579102,
"logits/rejected": 2.8811306953430176,
"logps/chosen": -512.532958984375,
"logps/rejected": -906.1589965820312,
"loss": 0.2862,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.7359873056411743,
"rewards/margins": 3.9433860778808594,
"rewards/rejected": -5.679372787475586,
"step": 440
},
{
"epoch": 0.6245662734212353,
"grad_norm": 20.352147452552153,
"learning_rate": 4.34319334202531e-07,
"logits/chosen": 1.8553016185760498,
"logits/rejected": 3.245082139968872,
"logps/chosen": -553.4808349609375,
"logps/rejected": -1043.968505859375,
"loss": 0.2671,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -2.185727834701538,
"rewards/margins": 4.84250545501709,
"rewards/rejected": -7.028233528137207,
"step": 450
},
{
"epoch": 0.6384455239417072,
"grad_norm": 14.530740198443842,
"learning_rate": 4.301713916019286e-07,
"logits/chosen": 1.8998327255249023,
"logits/rejected": 3.121591091156006,
"logps/chosen": -545.904296875,
"logps/rejected": -980.6796875,
"loss": 0.277,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.9527307748794556,
"rewards/margins": 4.239319324493408,
"rewards/rejected": -6.192049980163574,
"step": 460
},
{
"epoch": 0.6523247744621791,
"grad_norm": 14.543324773787445,
"learning_rate": 4.2591758353647643e-07,
"logits/chosen": 1.0112075805664062,
"logits/rejected": 2.648533821105957,
"logps/chosen": -551.0354614257812,
"logps/rejected": -958.7569580078125,
"loss": 0.2778,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": -1.892492651939392,
"rewards/margins": 4.073439598083496,
"rewards/rejected": -5.965932846069336,
"step": 470
},
{
"epoch": 0.666204024982651,
"grad_norm": 20.665751040711644,
"learning_rate": 4.2156040946718343e-07,
"logits/chosen": 1.5330281257629395,
"logits/rejected": 3.1015706062316895,
"logps/chosen": -588.2686157226562,
"logps/rejected": -1030.510498046875,
"loss": 0.2843,
"rewards/accuracies": 0.878125011920929,
"rewards/chosen": -2.3880248069763184,
"rewards/margins": 4.504977226257324,
"rewards/rejected": -6.893001556396484,
"step": 480
},
{
"epoch": 0.6800832755031229,
"grad_norm": 15.05601087138497,
"learning_rate": 4.1710242959106056e-07,
"logits/chosen": 0.5176582336425781,
"logits/rejected": 2.4557127952575684,
"logps/chosen": -507.008544921875,
"logps/rejected": -886.51416015625,
"loss": 0.294,
"rewards/accuracies": 0.846875011920929,
"rewards/chosen": -1.5845837593078613,
"rewards/margins": 3.810807704925537,
"rewards/rejected": -5.395391464233398,
"step": 490
},
{
"epoch": 0.6939625260235948,
"grad_norm": 16.718450087742482,
"learning_rate": 4.125462633367959e-07,
"logits/chosen": 1.4314930438995361,
"logits/rejected": 2.9996209144592285,
"logps/chosen": -532.1755981445312,
"logps/rejected": -1021.0213623046875,
"loss": 0.266,
"rewards/accuracies": 0.878125011920929,
"rewards/chosen": -2.0497827529907227,
"rewards/margins": 4.718562126159668,
"rewards/rejected": -6.768344879150391,
"step": 500
},
{
"epoch": 0.7078417765440667,
"grad_norm": 19.85458205442182,
"learning_rate": 4.0789458782562435e-07,
"logits/chosen": 1.293492317199707,
"logits/rejected": 2.812124013900757,
"logps/chosen": -551.9437866210938,
"logps/rejected": -1100.38720703125,
"loss": 0.2609,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.3401436805725098,
"rewards/margins": 5.148820400238037,
"rewards/rejected": -7.4889631271362305,
"step": 510
},
{
"epoch": 0.7217210270645386,
"grad_norm": 14.477360180818952,
"learning_rate": 4.031501362983007e-07,
"logits/chosen": 0.7156480550765991,
"logits/rejected": 2.6717166900634766,
"logps/chosen": -510.4054260253906,
"logps/rejected": -1019.923828125,
"loss": 0.2805,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -1.7749239206314087,
"rewards/margins": 4.909750938415527,
"rewards/rejected": -6.6846747398376465,
"step": 520
},
{
"epoch": 0.7356002775850105,
"grad_norm": 17.058981872372126,
"learning_rate": 3.9831569650909553e-07,
"logits/chosen": 0.01914766989648342,
"logits/rejected": 2.122251510620117,
"logps/chosen": -551.1067504882812,
"logps/rejected": -933.5851440429688,
"loss": 0.2736,
"rewards/accuracies": 0.846875011920929,
"rewards/chosen": -2.0334556102752686,
"rewards/margins": 3.904972791671753,
"rewards/rejected": -5.9384284019470215,
"step": 530
},
{
"epoch": 0.7494795281054824,
"grad_norm": 19.755034017893955,
"learning_rate": 3.933941090877615e-07,
"logits/chosen": -0.34956273436546326,
"logits/rejected": 2.1510236263275146,
"logps/chosen": -553.6760864257812,
"logps/rejected": -1035.0694580078125,
"loss": 0.2598,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.195675849914551,
"rewards/margins": 4.830143928527832,
"rewards/rejected": -7.025820732116699,
"step": 540
},
{
"epoch": 0.7633587786259542,
"grad_norm": 15.194278158742483,
"learning_rate": 3.883882658704306e-07,
"logits/chosen": -0.1008995771408081,
"logits/rejected": 2.1417980194091797,
"logps/chosen": -562.4782104492188,
"logps/rejected": -1068.9832763671875,
"loss": 0.2769,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": -2.3539323806762695,
"rewards/margins": 5.010300636291504,
"rewards/rejected": -7.364233493804932,
"step": 550
},
{
"epoch": 0.7772380291464261,
"grad_norm": 17.553015958549693,
"learning_rate": 3.833011082004228e-07,
"logits/chosen": -0.8681015968322754,
"logits/rejected": 1.4696462154388428,
"logps/chosen": -573.9002685546875,
"logps/rejected": -1092.654296875,
"loss": 0.2772,
"rewards/accuracies": 0.828125,
"rewards/chosen": -2.370056390762329,
"rewards/margins": 5.011393070220947,
"rewards/rejected": -7.381448268890381,
"step": 560
},
{
"epoch": 0.7911172796668979,
"grad_norm": 15.00893179617878,
"learning_rate": 3.781356251999663e-07,
"logits/chosen": -1.1506744623184204,
"logits/rejected": 0.8982523679733276,
"logps/chosen": -535.85595703125,
"logps/rejected": -979.8224487304688,
"loss": 0.2904,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.1066572666168213,
"rewards/margins": 4.214187145233154,
"rewards/rejected": -6.320844650268555,
"step": 570
},
{
"epoch": 0.8049965301873698,
"grad_norm": 17.833191557593317,
"learning_rate": 3.728948520138426e-07,
"logits/chosen": -0.45408257842063904,
"logits/rejected": 1.915353775024414,
"logps/chosen": -540.576171875,
"logps/rejected": -994.28564453125,
"loss": 0.2725,
"rewards/accuracies": 0.859375,
"rewards/chosen": -2.1601402759552,
"rewards/margins": 4.492788314819336,
"rewards/rejected": -6.652928829193115,
"step": 580
},
{
"epoch": 0.8188757807078417,
"grad_norm": 16.37664073810551,
"learning_rate": 3.6758186802599064e-07,
"logits/chosen": 0.19857454299926758,
"logits/rejected": 2.2812840938568115,
"logps/chosen": -550.7555541992188,
"logps/rejected": -1052.941162109375,
"loss": 0.2681,
"rewards/accuracies": 0.878125011920929,
"rewards/chosen": -2.186647415161133,
"rewards/margins": 4.857481956481934,
"rewards/rejected": -7.044129848480225,
"step": 590
},
{
"epoch": 0.8327550312283136,
"grad_norm": 15.489252159692187,
"learning_rate": 3.6219979505011555e-07,
"logits/chosen": 0.13169285655021667,
"logits/rejected": 2.4862585067749023,
"logps/chosen": -545.8336181640625,
"logps/rejected": -1015.2984619140625,
"loss": 0.2599,
"rewards/accuracies": 0.8843749761581421,
"rewards/chosen": -2.0449271202087402,
"rewards/margins": 4.806704521179199,
"rewards/rejected": -6.851631164550781,
"step": 600
},
{
"epoch": 0.8466342817487855,
"grad_norm": 16.034098237394367,
"learning_rate": 3.5675179549536786e-07,
"logits/chosen": 1.026650309562683,
"logits/rejected": 2.891331672668457,
"logps/chosen": -562.756103515625,
"logps/rejected": -1112.7884521484375,
"loss": 0.2655,
"rewards/accuracies": 0.871874988079071,
"rewards/chosen": -2.344050884246826,
"rewards/margins": 5.390646934509277,
"rewards/rejected": -7.7346978187561035,
"step": 610
},
{
"epoch": 0.8605135322692574,
"grad_norm": 15.964353907588587,
"learning_rate": 3.512410705081684e-07,
"logits/chosen": 0.5978251695632935,
"logits/rejected": 2.6756691932678223,
"logps/chosen": -596.0946655273438,
"logps/rejected": -1164.9000244140625,
"loss": 0.2654,
"rewards/accuracies": 0.890625,
"rewards/chosen": -2.3005499839782715,
"rewards/margins": 5.751313209533691,
"rewards/rejected": -8.051862716674805,
"step": 620
},
{
"epoch": 0.8743927827897293,
"grad_norm": 15.534990200131912,
"learning_rate": 3.4567085809127245e-07,
"logits/chosen": 0.7654634714126587,
"logits/rejected": 2.563817262649536,
"logps/chosen": -553.0380859375,
"logps/rejected": -1015.8709106445312,
"loss": 0.2837,
"rewards/accuracies": 0.8343750238418579,
"rewards/chosen": -2.105071544647217,
"rewards/margins": 4.519524097442627,
"rewards/rejected": -6.624594688415527,
"step": 630
},
{
"epoch": 0.8882720333102012,
"grad_norm": 15.473999576589652,
"learning_rate": 3.400444312011776e-07,
"logits/chosen": 1.6810909509658813,
"logits/rejected": 3.29026460647583,
"logps/chosen": -547.7821044921875,
"logps/rejected": -979.19580078125,
"loss": 0.2808,
"rewards/accuracies": 0.871874988079071,
"rewards/chosen": -2.2526745796203613,
"rewards/margins": 4.376776695251465,
"rewards/rejected": -6.629450798034668,
"step": 640
},
{
"epoch": 0.9021512838306731,
"grad_norm": 18.65677766855292,
"learning_rate": 3.343650958249935e-07,
"logits/chosen": 1.9508390426635742,
"logits/rejected": 3.384152889251709,
"logps/chosen": -611.1751708984375,
"logps/rejected": -1097.154541015625,
"loss": 0.2717,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.623706340789795,
"rewards/margins": 4.866621494293213,
"rewards/rejected": -7.49032735824585,
"step": 650
},
{
"epoch": 0.916030534351145,
"grad_norm": 15.172341090722101,
"learning_rate": 3.286361890379034e-07,
"logits/chosen": 1.418536901473999,
"logits/rejected": 2.9168481826782227,
"logps/chosen": -542.9044799804688,
"logps/rejected": -998.8190307617188,
"loss": 0.2649,
"rewards/accuracies": 0.878125011920929,
"rewards/chosen": -2.128884792327881,
"rewards/margins": 4.480809688568115,
"rewards/rejected": -6.6096954345703125,
"step": 660
},
{
"epoch": 0.9299097848716169,
"grad_norm": 21.03897492647871,
"learning_rate": 3.2286107704235875e-07,
"logits/chosen": 0.6511715650558472,
"logits/rejected": 2.4751362800598145,
"logps/chosen": -516.9407348632812,
"logps/rejected": -994.5736083984375,
"loss": 0.2687,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.8717918395996094,
"rewards/margins": 4.618001937866211,
"rewards/rejected": -6.489793300628662,
"step": 670
},
{
"epoch": 0.9437890353920888,
"grad_norm": 23.147160211659624,
"learning_rate": 3.1704315319015936e-07,
"logits/chosen": 0.42685967683792114,
"logits/rejected": 2.4200854301452637,
"logps/chosen": -569.0222778320312,
"logps/rejected": -1023.0499877929688,
"loss": 0.2637,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -2.241736650466919,
"rewards/margins": 4.569226264953613,
"rewards/rejected": -6.8109636306762695,
"step": 680
},
{
"epoch": 0.9576682859125607,
"grad_norm": 19.074650599712797,
"learning_rate": 3.1118583598858094e-07,
"logits/chosen": 0.3251928687095642,
"logits/rejected": 2.361527442932129,
"logps/chosen": -536.3818969726562,
"logps/rejected": -1084.29150390625,
"loss": 0.269,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.159126043319702,
"rewards/margins": 5.291468620300293,
"rewards/rejected": -7.450594425201416,
"step": 690
},
{
"epoch": 0.9715475364330326,
"grad_norm": 16.46319504999634,
"learning_rate": 3.052925670917219e-07,
"logits/chosen": 0.3188991844654083,
"logits/rejected": 2.1961517333984375,
"logps/chosen": -590.7283935546875,
"logps/rejected": -1070.7264404296875,
"loss": 0.2754,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -2.466348171234131,
"rewards/margins": 4.865278720855713,
"rewards/rejected": -7.33162784576416,
"step": 700
},
{
"epoch": 0.9854267869535045,
"grad_norm": 22.275614803003936,
"learning_rate": 2.9936680927824935e-07,
"logits/chosen": 0.7669705152511597,
"logits/rejected": 2.4150214195251465,
"logps/chosen": -574.384033203125,
"logps/rejected": -1084.837646484375,
"loss": 0.2543,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -2.393491268157959,
"rewards/margins": 5.025046348571777,
"rewards/rejected": -7.4185380935668945,
"step": 710
},
{
"epoch": 0.9993060374739764,
"grad_norm": 22.856869601641986,
"learning_rate": 2.934120444167326e-07,
"logits/chosen": 0.018332133069634438,
"logits/rejected": 1.9890985488891602,
"logps/chosen": -554.123291015625,
"logps/rejected": -1072.498046875,
"loss": 0.2651,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": -2.201577663421631,
"rewards/margins": 5.162604331970215,
"rewards/rejected": -7.3641815185546875,
"step": 720
},
{
"epoch": 1.0131852879944483,
"grad_norm": 16.323336520132766,
"learning_rate": 2.8743177141975993e-07,
"logits/chosen": 0.2875978350639343,
"logits/rejected": 2.0843756198883057,
"logps/chosen": -598.0647583007812,
"logps/rejected": -1214.6568603515625,
"loss": 0.1848,
"rewards/accuracies": 0.921875,
"rewards/chosen": -2.478050708770752,
"rewards/margins": 6.1301589012146,
"rewards/rejected": -8.608209609985352,
"step": 730
},
{
"epoch": 1.0270645385149202,
"grad_norm": 17.717617068699322,
"learning_rate": 2.814295041880407e-07,
"logits/chosen": 1.5749400854110718,
"logits/rejected": 3.2800605297088623,
"logps/chosen": -720.4405517578125,
"logps/rejected": -1425.5921630859375,
"loss": 0.1738,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": -3.6376540660858154,
"rewards/margins": 6.913857460021973,
"rewards/rejected": -10.55151081085205,
"step": 740
},
{
"epoch": 1.040943789035392,
"grad_norm": 16.138966847361562,
"learning_rate": 2.754087695457005e-07,
"logits/chosen": 1.3446929454803467,
"logits/rejected": 3.647613525390625,
"logps/chosen": -660.2525024414062,
"logps/rejected": -1274.7076416015625,
"loss": 0.1695,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.0320277214050293,
"rewards/margins": 6.136539936065674,
"rewards/rejected": -9.168566703796387,
"step": 750
},
{
"epoch": 1.054823039555864,
"grad_norm": 15.364782345427345,
"learning_rate": 2.6937310516798275e-07,
"logits/chosen": 1.2250503301620483,
"logits/rejected": 3.3649864196777344,
"logps/chosen": -629.1475830078125,
"logps/rejected": -1302.7933349609375,
"loss": 0.1799,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.8058159351348877,
"rewards/margins": 6.627597808837891,
"rewards/rejected": -9.4334135055542,
"step": 760
},
{
"epoch": 1.0687022900763359,
"grad_norm": 18.530915715706502,
"learning_rate": 2.6332605750257456e-07,
"logits/chosen": 0.9841400384902954,
"logits/rejected": 3.341764450073242,
"logps/chosen": -656.33251953125,
"logps/rejected": -1349.439697265625,
"loss": 0.1607,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.064764976501465,
"rewards/margins": 6.942727565765381,
"rewards/rejected": -10.007492065429688,
"step": 770
},
{
"epoch": 1.0825815405968078,
"grad_norm": 18.346652449927596,
"learning_rate": 2.5727117968577785e-07,
"logits/chosen": 1.088732361793518,
"logits/rejected": 3.094304323196411,
"logps/chosen": -656.88134765625,
"logps/rejected": -1397.916259765625,
"loss": 0.1514,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -3.20575213432312,
"rewards/margins": 7.160604000091553,
"rewards/rejected": -10.366357803344727,
"step": 780
},
{
"epoch": 1.0964607911172797,
"grad_norm": 17.314542797248876,
"learning_rate": 2.5121202945475043e-07,
"logits/chosen": 1.5640583038330078,
"logits/rejected": 3.803560256958008,
"logps/chosen": -630.0179443359375,
"logps/rejected": -1349.983642578125,
"loss": 0.178,
"rewards/accuracies": 0.909375011920929,
"rewards/chosen": -2.848733425140381,
"rewards/margins": 7.226857662200928,
"rewards/rejected": -10.075590133666992,
"step": 790
},
{
"epoch": 1.1103400416377516,
"grad_norm": 14.955502239768675,
"learning_rate": 2.4515216705704393e-07,
"logits/chosen": 1.8664920330047607,
"logits/rejected": 3.7396292686462402,
"logps/chosen": -608.3690795898438,
"logps/rejected": -1366.815673828125,
"loss": 0.1637,
"rewards/accuracies": 0.953125,
"rewards/chosen": -2.7840514183044434,
"rewards/margins": 7.429004669189453,
"rewards/rejected": -10.213056564331055,
"step": 800
},
{
"epoch": 1.1242192921582235,
"grad_norm": 18.71030295389215,
"learning_rate": 2.39095153158666e-07,
"logits/chosen": 1.694549560546875,
"logits/rejected": 3.747912645339966,
"logps/chosen": -657.016845703125,
"logps/rejected": -1396.2716064453125,
"loss": 0.1581,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.1420130729675293,
"rewards/margins": 7.294529914855957,
"rewards/rejected": -10.436542510986328,
"step": 810
},
{
"epoch": 1.1380985426786954,
"grad_norm": 14.829984041140372,
"learning_rate": 2.330445467518977e-07,
"logits/chosen": 0.7256888747215271,
"logits/rejected": 2.9225716590881348,
"logps/chosen": -660.5631103515625,
"logps/rejected": -1347.63134765625,
"loss": 0.1619,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -3.208239793777466,
"rewards/margins": 6.785590171813965,
"rewards/rejected": -9.993829727172852,
"step": 820
},
{
"epoch": 1.1519777931991673,
"grad_norm": 22.137973289587368,
"learning_rate": 2.270039030640931e-07,
"logits/chosen": 1.578162431716919,
"logits/rejected": 3.47196888923645,
"logps/chosen": -628.019287109375,
"logps/rejected": -1320.9600830078125,
"loss": 0.1749,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.9641802310943604,
"rewards/margins": 6.771336555480957,
"rewards/rejected": -9.735516548156738,
"step": 830
},
{
"epoch": 1.1658570437196392,
"grad_norm": 19.216307710411925,
"learning_rate": 2.209767714686924e-07,
"logits/chosen": 1.5978351831436157,
"logits/rejected": 3.7053027153015137,
"logps/chosen": -653.354736328125,
"logps/rejected": -1406.637451171875,
"loss": 0.1606,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.2107303142547607,
"rewards/margins": 7.3466925621032715,
"rewards/rejected": -10.557422637939453,
"step": 840
},
{
"epoch": 1.179736294240111,
"grad_norm": 21.415640762520873,
"learning_rate": 2.1496669339967344e-07,
"logits/chosen": 1.8785479068756104,
"logits/rejected": 3.8594677448272705,
"logps/chosen": -668.572265625,
"logps/rejected": -1460.521240234375,
"loss": 0.1611,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": -3.3157646656036377,
"rewards/margins": 7.8289475440979,
"rewards/rejected": -11.144712448120117,
"step": 850
},
{
"epoch": 1.193615544760583,
"grad_norm": 19.68985482074639,
"learning_rate": 2.0897720027066897e-07,
"logits/chosen": 1.5298289060592651,
"logits/rejected": 3.88330078125,
"logps/chosen": -660.7781982421875,
"logps/rejected": -1415.609130859375,
"loss": 0.1637,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.227867603302002,
"rewards/margins": 7.6040802001953125,
"rewards/rejected": -10.831949234008789,
"step": 860
},
{
"epoch": 1.2074947952810549,
"grad_norm": 19.76316476703645,
"learning_rate": 2.0301181139997202e-07,
"logits/chosen": 1.5017510652542114,
"logits/rejected": 3.601763963699341,
"logps/chosen": -664.7364501953125,
"logps/rejected": -1365.153564453125,
"loss": 0.1716,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.1189022064208984,
"rewards/margins": 6.980868339538574,
"rewards/rejected": -10.099771499633789,
"step": 870
},
{
"epoch": 1.2213740458015268,
"grad_norm": 15.231393479477973,
"learning_rate": 1.970740319426474e-07,
"logits/chosen": 2.0277457237243652,
"logits/rejected": 3.8836147785186768,
"logps/chosen": -615.454833984375,
"logps/rejected": -1219.456298828125,
"loss": 0.1803,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": -2.8338332176208496,
"rewards/margins": 5.970818519592285,
"rewards/rejected": -8.804651260375977,
"step": 880
},
{
"epoch": 1.2352532963219987,
"grad_norm": 23.68508206139152,
"learning_rate": 1.911673508309656e-07,
"logits/chosen": 1.9813998937606812,
"logits/rejected": 3.9024734497070312,
"logps/chosen": -628.2916259765625,
"logps/rejected": -1264.0699462890625,
"loss": 0.1643,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -2.8454182147979736,
"rewards/margins": 6.396851539611816,
"rewards/rejected": -9.242269515991211,
"step": 890
},
{
"epoch": 1.2491325468424705,
"grad_norm": 19.952403887659834,
"learning_rate": 1.8529523872436977e-07,
"logits/chosen": 1.032080054283142,
"logits/rejected": 3.210120677947998,
"logps/chosen": -671.1250610351562,
"logps/rejected": -1549.0615234375,
"loss": 0.16,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -3.1327102184295654,
"rewards/margins": 8.625667572021484,
"rewards/rejected": -11.758378982543945,
"step": 900
},
{
"epoch": 1.2630117973629424,
"grad_norm": 22.651112148813382,
"learning_rate": 1.7946114597017808e-07,
"logits/chosen": 1.6984176635742188,
"logits/rejected": 3.7526144981384277,
"logps/chosen": -660.7144165039062,
"logps/rejected": -1447.436767578125,
"loss": 0.1619,
"rewards/accuracies": 0.940625011920929,
"rewards/chosen": -3.2229180335998535,
"rewards/margins": 7.7218732833862305,
"rewards/rejected": -10.944790840148926,
"step": 910
},
{
"epoch": 1.2768910478834143,
"grad_norm": 22.126302318317872,
"learning_rate": 1.7366850057622172e-07,
"logits/chosen": 1.6554454565048218,
"logits/rejected": 3.758930206298828,
"logps/chosen": -654.619140625,
"logps/rejected": -1429.7637939453125,
"loss": 0.1653,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": -3.282761812210083,
"rewards/margins": 7.560622215270996,
"rewards/rejected": -10.8433837890625,
"step": 920
},
{
"epoch": 1.2907702984038862,
"grad_norm": 17.175640354603786,
"learning_rate": 1.6792070619660974e-07,
"logits/chosen": 1.6677768230438232,
"logits/rejected": 3.8167777061462402,
"logps/chosen": -654.5197143554688,
"logps/rejected": -1449.1417236328125,
"loss": 0.1654,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.2044639587402344,
"rewards/margins": 7.7717132568359375,
"rewards/rejected": -10.976176261901855,
"step": 930
},
{
"epoch": 1.3046495489243581,
"grad_norm": 19.342272367773113,
"learning_rate": 1.622211401318028e-07,
"logits/chosen": 1.567354440689087,
"logits/rejected": 4.129142761230469,
"logps/chosen": -657.1278686523438,
"logps/rejected": -1453.904541015625,
"loss": 0.1595,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.066843271255493,
"rewards/margins": 7.886987209320068,
"rewards/rejected": -10.953829765319824,
"step": 940
},
{
"epoch": 1.31852879944483,
"grad_norm": 23.040092913699233,
"learning_rate": 1.5657315134417244e-07,
"logits/chosen": 1.8149755001068115,
"logits/rejected": 4.2323408126831055,
"logps/chosen": -688.6448364257812,
"logps/rejected": -1585.87841796875,
"loss": 0.1665,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.4780020713806152,
"rewards/margins": 8.797907829284668,
"rewards/rejected": -12.275908470153809,
"step": 950
},
{
"epoch": 1.332408049965302,
"grad_norm": 17.336766543398397,
"learning_rate": 1.5098005849021078e-07,
"logits/chosen": 1.7025401592254639,
"logits/rejected": 4.534233570098877,
"logps/chosen": -703.167724609375,
"logps/rejected": -1630.665283203125,
"loss": 0.1672,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": -3.5511913299560547,
"rewards/margins": 9.27750301361084,
"rewards/rejected": -12.828694343566895,
"step": 960
},
{
"epoch": 1.3462873004857738,
"grad_norm": 20.603249005514577,
"learning_rate": 1.454451479705484e-07,
"logits/chosen": 2.531970500946045,
"logits/rejected": 4.76376485824585,
"logps/chosen": -658.04931640625,
"logps/rejected": -1445.8599853515625,
"loss": 0.1611,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.303406238555908,
"rewards/margins": 7.701096534729004,
"rewards/rejected": -11.00450325012207,
"step": 970
},
{
"epoch": 1.3601665510062457,
"grad_norm": 26.637520706621387,
"learning_rate": 1.3997167199892385e-07,
"logits/chosen": 2.412917375564575,
"logits/rejected": 4.9495530128479,
"logps/chosen": -706.7518310546875,
"logps/rejected": -1550.7310791015625,
"loss": 0.16,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.5566086769104004,
"rewards/margins": 8.430598258972168,
"rewards/rejected": -11.987205505371094,
"step": 980
},
{
"epoch": 1.3740458015267176,
"grad_norm": 22.552075685705777,
"learning_rate": 1.3456284669124157e-07,
"logits/chosen": 2.5810890197753906,
"logits/rejected": 4.767507076263428,
"logps/chosen": -692.7928466796875,
"logps/rejected": -1483.0191650390625,
"loss": 0.1661,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.5196609497070312,
"rewards/margins": 7.722577095031738,
"rewards/rejected": -11.24223804473877,
"step": 990
},
{
"epoch": 1.3879250520471895,
"grad_norm": 18.63062778053758,
"learning_rate": 1.2922185017584036e-07,
"logits/chosen": 2.9480197429656982,
"logits/rejected": 5.796414375305176,
"logps/chosen": -692.9706420898438,
"logps/rejected": -1486.6146240234375,
"loss": 0.1587,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.4646847248077393,
"rewards/margins": 7.979840278625488,
"rewards/rejected": -11.444524765014648,
"step": 1000
},
{
"epoch": 1.3879250520471895,
"eval_logits/chosen": 2.4397730827331543,
"eval_logits/rejected": 4.560609817504883,
"eval_logps/chosen": -779.3055419921875,
"eval_logps/rejected": -1613.5015869140625,
"eval_loss": 0.24712695181369781,
"eval_rewards/accuracies": 0.8909774422645569,
"eval_rewards/chosen": -3.947213649749756,
"eval_rewards/margins": 8.086578369140625,
"eval_rewards/rejected": -12.033791542053223,
"eval_runtime": 385.5049,
"eval_samples_per_second": 22.026,
"eval_steps_per_second": 0.345,
"step": 1000
},
{
"epoch": 1.4018043025676614,
"grad_norm": 17.720503294892175,
"learning_rate": 1.2395182072608245e-07,
"logits/chosen": 2.783371925354004,
"logits/rejected": 5.293555736541748,
"logps/chosen": -693.8554077148438,
"logps/rejected": -1455.6026611328125,
"loss": 0.1578,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -3.711820125579834,
"rewards/margins": 7.577448844909668,
"rewards/rejected": -11.289270401000977,
"step": 1010
},
{
"epoch": 1.4156835530881333,
"grad_norm": 39.790575003507485,
"learning_rate": 1.1875585491635998e-07,
"logits/chosen": 3.120913028717041,
"logits/rejected": 5.793875217437744,
"logps/chosen": -757.794921875,
"logps/rejected": -1674.984375,
"loss": 0.1529,
"rewards/accuracies": 0.9375,
"rewards/chosen": -4.312003135681152,
"rewards/margins": 8.977596282958984,
"rewards/rejected": -13.289599418640137,
"step": 1020
},
{
"epoch": 1.4295628036086052,
"grad_norm": 69.28439112059148,
"learning_rate": 1.1363700580260438e-07,
"logits/chosen": 3.0012755393981934,
"logits/rejected": 5.11987829208374,
"logps/chosen": -737.9568481445312,
"logps/rejected": -1678.4951171875,
"loss": 0.161,
"rewards/accuracies": 0.8968750238418579,
"rewards/chosen": -4.191954612731934,
"rewards/margins": 9.063277244567871,
"rewards/rejected": -13.255231857299805,
"step": 1030
},
{
"epoch": 1.4434420541290771,
"grad_norm": 18.312529268747,
"learning_rate": 1.0859828112836539e-07,
"logits/chosen": 2.285982847213745,
"logits/rejected": 4.8581390380859375,
"logps/chosen": -739.9261474609375,
"logps/rejected": -1586.6966552734375,
"loss": 0.1639,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.9794273376464844,
"rewards/margins": 8.331348419189453,
"rewards/rejected": -12.310776710510254,
"step": 1040
},
{
"epoch": 1.457321304649549,
"grad_norm": 25.436043150499486,
"learning_rate": 1.0364264155751487e-07,
"logits/chosen": 2.474147081375122,
"logits/rejected": 4.805792331695557,
"logps/chosen": -723.0252685546875,
"logps/rejected": -1613.719970703125,
"loss": 0.1599,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": -4.043066501617432,
"rewards/margins": 8.693578720092773,
"rewards/rejected": -12.736645698547363,
"step": 1050
},
{
"epoch": 1.4712005551700207,
"grad_norm": 18.578653218988183,
"learning_rate": 9.877299893461455e-08,
"logits/chosen": 2.1861484050750732,
"logits/rejected": 5.042906761169434,
"logps/chosen": -748.6861572265625,
"logps/rejected": -1565.2005615234375,
"loss": 0.1539,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": -4.071117401123047,
"rewards/margins": 8.12061595916748,
"rewards/rejected": -12.191734313964844,
"step": 1060
},
{
"epoch": 1.4850798056904928,
"grad_norm": 21.104448029863153,
"learning_rate": 9.39922145739683e-08,
"logits/chosen": 2.260671854019165,
"logits/rejected": 4.919951438903809,
"logps/chosen": -785.3671875,
"logps/rejected": -1578.3946533203125,
"loss": 0.1559,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -4.341281414031982,
"rewards/margins": 7.95664119720459,
"rewards/rejected": -12.29792308807373,
"step": 1070
},
{
"epoch": 1.4989590562109645,
"grad_norm": 30.412482027711867,
"learning_rate": 8.930309757836516e-08,
"logits/chosen": 2.812704563140869,
"logits/rejected": 5.714043140411377,
"logps/chosen": -803.9837036132812,
"logps/rejected": -1662.1617431640625,
"loss": 0.1624,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -4.532739162445068,
"rewards/margins": 8.574421882629395,
"rewards/rejected": -13.107160568237305,
"step": 1080
},
{
"epoch": 1.5128383067314366,
"grad_norm": 27.560035648494992,
"learning_rate": 8.470840318850168e-08,
"logits/chosen": 2.2375552654266357,
"logits/rejected": 5.399850368499756,
"logps/chosen": -772.5816650390625,
"logps/rejected": -1577.226806640625,
"loss": 0.1576,
"rewards/accuracies": 0.953125,
"rewards/chosen": -4.188956260681152,
"rewards/margins": 8.190386772155762,
"rewards/rejected": -12.379343032836914,
"step": 1090
},
{
"epoch": 1.5267175572519083,
"grad_norm": 21.688406462998472,
"learning_rate": 8.021083116405173e-08,
"logits/chosen": 2.3147189617156982,
"logits/rejected": 5.235360145568848,
"logps/chosen": -789.7681884765625,
"logps/rejected": -1536.5394287109375,
"loss": 0.1576,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": -4.277130126953125,
"rewards/margins": 7.6214165687561035,
"rewards/rejected": -11.898547172546387,
"step": 1100
},
{
"epoch": 1.5405968077723804,
"grad_norm": 17.348108637258065,
"learning_rate": 7.581302419733632e-08,
"logits/chosen": 2.7001595497131348,
"logits/rejected": 5.277278900146484,
"logps/chosen": -735.4856567382812,
"logps/rejected": -1611.0947265625,
"loss": 0.1519,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -4.2074875831604,
"rewards/margins": 8.55141830444336,
"rewards/rejected": -12.758907318115234,
"step": 1110
},
{
"epoch": 1.554476058292852,
"grad_norm": 26.00795087450143,
"learning_rate": 7.151756636052527e-08,
"logits/chosen": 2.411917209625244,
"logits/rejected": 5.364395618438721,
"logps/chosen": -790.9634399414062,
"logps/rejected": -1729.780517578125,
"loss": 0.1535,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -4.578307151794434,
"rewards/margins": 9.278319358825684,
"rewards/rejected": -13.856626510620117,
"step": 1120
},
{
"epoch": 1.5683553088133242,
"grad_norm": 19.597840266227173,
"learning_rate": 6.732698158728315e-08,
"logits/chosen": 2.3103692531585693,
"logits/rejected": 5.478982448577881,
"logps/chosen": -766.3836059570312,
"logps/rejected": -1625.908203125,
"loss": 0.1485,
"rewards/accuracies": 0.934374988079071,
"rewards/chosen": -4.306478977203369,
"rewards/margins": 8.519509315490723,
"rewards/rejected": -12.82598876953125,
"step": 1130
},
{
"epoch": 1.5822345593337959,
"grad_norm": 37.32265853585954,
"learning_rate": 6.324373218975104e-08,
"logits/chosen": 2.6391475200653076,
"logits/rejected": 5.588366508483887,
"logps/chosen": -738.453125,
"logps/rejected": -1503.0093994140625,
"loss": 0.1743,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -4.1589436531066895,
"rewards/margins": 7.595011234283447,
"rewards/rejected": -11.75395393371582,
"step": 1140
},
{
"epoch": 1.596113809854268,
"grad_norm": 34.02286786789855,
"learning_rate": 5.927021741173624e-08,
"logits/chosen": 2.2584009170532227,
"logits/rejected": 5.177404403686523,
"logps/chosen": -716.9951171875,
"logps/rejected": -1559.7662353515625,
"loss": 0.1604,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": -4.111263275146484,
"rewards/margins": 8.235260963439941,
"rewards/rejected": -12.346525192260742,
"step": 1150
},
{
"epoch": 1.6099930603747397,
"grad_norm": 20.932494952987984,
"learning_rate": 5.5408772018959996e-08,
"logits/chosen": 2.0977630615234375,
"logits/rejected": 5.0792131423950195,
"logps/chosen": -744.5228271484375,
"logps/rejected": -1543.290283203125,
"loss": 0.1549,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": -3.9662253856658936,
"rewards/margins": 7.838561058044434,
"rewards/rejected": -11.80478572845459,
"step": 1160
},
{
"epoch": 1.6238723108952118,
"grad_norm": 18.156628188332558,
"learning_rate": 5.166166492719124e-08,
"logits/chosen": 2.717636823654175,
"logits/rejected": 5.455983638763428,
"logps/chosen": -789.8887939453125,
"logps/rejected": -1659.1243896484375,
"loss": 0.1431,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -4.5077619552612305,
"rewards/margins": 8.738015174865723,
"rewards/rejected": -13.245776176452637,
"step": 1170
},
{
"epoch": 1.6377515614156835,
"grad_norm": 31.62818641679718,
"learning_rate": 4.8031097869072225e-08,
"logits/chosen": 2.479032516479492,
"logits/rejected": 5.483838081359863,
"logps/chosen": -843.8903198242188,
"logps/rejected": -1682.486328125,
"loss": 0.1808,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -4.988056182861328,
"rewards/margins": 8.428102493286133,
"rewards/rejected": -13.416158676147461,
"step": 1180
},
{
"epoch": 1.6516308119361556,
"grad_norm": 23.367654380160257,
"learning_rate": 4.451920410042048e-08,
"logits/chosen": 2.4087483882904053,
"logits/rejected": 5.190215110778809,
"logps/chosen": -769.288818359375,
"logps/rejected": -1569.325927734375,
"loss": 0.1421,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -4.326132297515869,
"rewards/margins": 7.9340925216674805,
"rewards/rejected": -12.260224342346191,
"step": 1190
},
{
"epoch": 1.6655100624566272,
"grad_norm": 19.705530443812812,
"learning_rate": 4.112804714676593e-08,
"logits/chosen": 2.707061529159546,
"logits/rejected": 5.535910606384277,
"logps/chosen": -801.08935546875,
"logps/rejected": -1702.814453125,
"loss": 0.1442,
"rewards/accuracies": 0.953125,
"rewards/chosen": -4.545718193054199,
"rewards/margins": 8.991861343383789,
"rewards/rejected": -13.537579536437988,
"step": 1200
},
{
"epoch": 1.6793893129770994,
"grad_norm": 18.481993519274262,
"learning_rate": 3.785961959086026e-08,
"logits/chosen": 2.372300624847412,
"logits/rejected": 4.822690010070801,
"logps/chosen": -803.9580688476562,
"logps/rejected": -1608.513671875,
"loss": 0.1649,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": -4.596296787261963,
"rewards/margins": 7.976349830627441,
"rewards/rejected": -12.572647094726562,
"step": 1210
},
{
"epoch": 1.693268563497571,
"grad_norm": 17.963706056533137,
"learning_rate": 3.4715841901871545e-08,
"logits/chosen": 2.230515956878662,
"logits/rejected": 4.730217456817627,
"logps/chosen": -780.9340209960938,
"logps/rejected": -1548.3511962890625,
"loss": 0.1478,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -4.49829626083374,
"rewards/margins": 7.652662754058838,
"rewards/rejected": -12.150960922241211,
"step": 1220
},
{
"epoch": 1.7071478140180432,
"grad_norm": 19.452721114097585,
"learning_rate": 3.169856130695106e-08,
"logits/chosen": 2.7935097217559814,
"logits/rejected": 5.348397254943848,
"logps/chosen": -837.3346557617188,
"logps/rejected": -1694.122314453125,
"loss": 0.1637,
"rewards/accuracies": 0.90625,
"rewards/chosen": -4.844791412353516,
"rewards/margins": 8.404356002807617,
"rewards/rejected": -13.249147415161133,
"step": 1230
},
{
"epoch": 1.7210270645385148,
"grad_norm": 24.207735428419646,
"learning_rate": 2.8809550705835546e-08,
"logits/chosen": 2.0396385192871094,
"logits/rejected": 5.347299098968506,
"logps/chosen": -809.9459838867188,
"logps/rejected": -1699.661865234375,
"loss": 0.1595,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": -4.592886447906494,
"rewards/margins": 8.931785583496094,
"rewards/rejected": -13.52467155456543,
"step": 1240
},
{
"epoch": 1.734906315058987,
"grad_norm": 24.611504297665018,
"learning_rate": 2.6050507629123724e-08,
"logits/chosen": 2.3445913791656494,
"logits/rejected": 5.292250633239746,
"logps/chosen": -763.6495361328125,
"logps/rejected": -1591.069091796875,
"loss": 0.1746,
"rewards/accuracies": 0.921875,
"rewards/chosen": -4.202335834503174,
"rewards/margins": 8.237665176391602,
"rewards/rejected": -12.440000534057617,
"step": 1250
},
{
"epoch": 1.7487855655794586,
"grad_norm": 17.351441576745916,
"learning_rate": 2.3423053240837514e-08,
"logits/chosen": 2.1456151008605957,
"logits/rejected": 5.197157859802246,
"logps/chosen": -775.988525390625,
"logps/rejected": -1635.9970703125,
"loss": 0.1602,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -4.428194999694824,
"rewards/margins": 8.511189460754395,
"rewards/rejected": -12.939386367797852,
"step": 1260
},
{
"epoch": 1.7626648160999308,
"grad_norm": 23.373280732240797,
"learning_rate": 2.0928731385855548e-08,
"logits/chosen": 2.3203883171081543,
"logits/rejected": 5.307148456573486,
"logps/chosen": -750.8080444335938,
"logps/rejected": -1596.9986572265625,
"loss": 0.1503,
"rewards/accuracies": 0.921875,
"rewards/chosen": -4.260045051574707,
"rewards/margins": 8.443410873413086,
"rewards/rejected": -12.703454971313477,
"step": 1270
},
{
"epoch": 1.7765440666204024,
"grad_norm": 21.91095019107269,
"learning_rate": 1.8569007682777415e-08,
"logits/chosen": 2.3937430381774902,
"logits/rejected": 5.018430233001709,
"logps/chosen": -775.3260498046875,
"logps/rejected": -1653.3775634765625,
"loss": 0.1517,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": -4.4354681968688965,
"rewards/margins": 8.597478866577148,
"rewards/rejected": -13.032946586608887,
"step": 1280
},
{
"epoch": 1.7904233171408745,
"grad_norm": 18.785456105009327,
"learning_rate": 1.6345268662752904e-08,
"logits/chosen": 2.1837244033813477,
"logits/rejected": 4.996502876281738,
"logps/chosen": -797.1134643554688,
"logps/rejected": -1595.029541015625,
"loss": 0.1469,
"rewards/accuracies": 0.934374988079071,
"rewards/chosen": -4.397826671600342,
"rewards/margins": 7.9338555335998535,
"rewards/rejected": -12.331681251525879,
"step": 1290
},
{
"epoch": 1.8043025676613462,
"grad_norm": 22.286605985901687,
"learning_rate": 1.4258820954781037e-08,
"logits/chosen": 2.1744418144226074,
"logits/rejected": 5.030113697052002,
"logps/chosen": -767.9185180664062,
"logps/rejected": -1573.216552734375,
"loss": 0.1686,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -4.429121971130371,
"rewards/margins": 7.945558071136475,
"rewards/rejected": -12.37468147277832,
"step": 1300
},
{
"epoch": 1.8181818181818183,
"grad_norm": 25.01416006471772,
"learning_rate": 1.2310890517958389e-08,
"logits/chosen": 2.1604392528533936,
"logits/rejected": 5.2982378005981445,
"logps/chosen": -799.98046875,
"logps/rejected": -1684.6910400390625,
"loss": 0.1451,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -4.48465633392334,
"rewards/margins": 8.867886543273926,
"rewards/rejected": -13.352543830871582,
"step": 1310
},
{
"epoch": 1.83206106870229,
"grad_norm": 22.26904532535015,
"learning_rate": 1.0502621921127774e-08,
"logits/chosen": 2.3922975063323975,
"logits/rejected": 5.1035685539245605,
"logps/chosen": -778.9490966796875,
"logps/rejected": -1598.516845703125,
"loss": 0.1596,
"rewards/accuracies": 0.909375011920929,
"rewards/chosen": -4.48614501953125,
"rewards/margins": 8.004111289978027,
"rewards/rejected": -12.490256309509277,
"step": 1320
},
{
"epoch": 1.845940319222762,
"grad_norm": 20.889996927509948,
"learning_rate": 8.83507767035016e-09,
"logits/chosen": 2.3073320388793945,
"logits/rejected": 4.977648735046387,
"logps/chosen": -797.042236328125,
"logps/rejected": -1626.1595458984375,
"loss": 0.1657,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -4.542098045349121,
"rewards/margins": 8.27629280090332,
"rewards/rejected": -12.818391799926758,
"step": 1330
},
{
"epoch": 1.8598195697432338,
"grad_norm": 26.353814591753384,
"learning_rate": 7.309237584595007e-09,
"logits/chosen": 2.40840220451355,
"logits/rejected": 5.275250434875488,
"logps/chosen": -784.8826904296875,
"logps/rejected": -1593.8211669921875,
"loss": 0.1604,
"rewards/accuracies": 0.934374988079071,
"rewards/chosen": -4.507115364074707,
"rewards/margins": 8.047722816467285,
"rewards/rejected": -12.554839134216309,
"step": 1340
},
{
"epoch": 1.8736988202637057,
"grad_norm": 27.69480128946826,
"learning_rate": 5.925998220016659e-09,
"logits/chosen": 2.0922293663024902,
"logits/rejected": 5.0452680587768555,
"logps/chosen": -795.8992919921875,
"logps/rejected": -1614.81298828125,
"loss": 0.1657,
"rewards/accuracies": 0.940625011920929,
"rewards/chosen": -4.478159427642822,
"rewards/margins": 8.272706985473633,
"rewards/rejected": -12.75086498260498,
"step": 1350
},
{
"epoch": 1.8875780707841776,
"grad_norm": 21.698890899471028,
"learning_rate": 4.6861723431538265e-09,
"logits/chosen": 2.527801275253296,
"logits/rejected": 5.509335517883301,
"logps/chosen": -762.1702270507812,
"logps/rejected": -1630.4300537109375,
"loss": 0.16,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": -4.3918232917785645,
"rewards/margins": 8.475740432739258,
"rewards/rejected": -12.867563247680664,
"step": 1360
},
{
"epoch": 1.9014573213046495,
"grad_norm": 23.606565024567274,
"learning_rate": 3.5904884533627113e-09,
"logits/chosen": 2.040306568145752,
"logits/rejected": 5.0976457595825195,
"logps/chosen": -809.3186645507812,
"logps/rejected": -1723.1839599609375,
"loss": 0.1661,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -4.630982398986816,
"rewards/margins": 9.00139331817627,
"rewards/rejected": -13.63237476348877,
"step": 1370
},
{
"epoch": 1.9153365718251214,
"grad_norm": 23.741237778017844,
"learning_rate": 2.639590354763882e-09,
"logits/chosen": 2.073582410812378,
"logits/rejected": 4.958869934082031,
"logps/chosen": -795.91162109375,
"logps/rejected": -1612.056396484375,
"loss": 0.1613,
"rewards/accuracies": 0.940625011920929,
"rewards/chosen": -4.596648693084717,
"rewards/margins": 8.110963821411133,
"rewards/rejected": -12.707611083984375,
"step": 1380
},
{
"epoch": 1.9292158223455933,
"grad_norm": 18.578608062996363,
"learning_rate": 1.8340367779545452e-09,
"logits/chosen": 2.297240972518921,
"logits/rejected": 5.08568811416626,
"logps/chosen": -794.4322509765625,
"logps/rejected": -1613.2269287109375,
"loss": 0.1486,
"rewards/accuracies": 0.9375,
"rewards/chosen": -4.632008075714111,
"rewards/margins": 8.13344955444336,
"rewards/rejected": -12.765457153320312,
"step": 1390
},
{
"epoch": 1.9430950728660652,
"grad_norm": 18.67065103551978,
"learning_rate": 1.1743010517085427e-09,
"logits/chosen": 2.2125773429870605,
"logits/rejected": 5.183515548706055,
"logps/chosen": -828.9417724609375,
"logps/rejected": -1649.573974609375,
"loss": 0.1548,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -4.853725433349609,
"rewards/margins": 8.179043769836426,
"rewards/rejected": -13.032768249511719,
"step": 1400
},
{
"epoch": 1.956974323386537,
"grad_norm": 22.95032078584552,
"learning_rate": 6.607708248569377e-10,
"logits/chosen": 2.1667537689208984,
"logits/rejected": 4.85316276550293,
"logps/chosen": -793.8030395507812,
"logps/rejected": -1551.1268310546875,
"loss": 0.1672,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -4.6431074142456055,
"rewards/margins": 7.463442325592041,
"rewards/rejected": -12.106550216674805,
"step": 1410
},
{
"epoch": 1.970853573907009,
"grad_norm": 18.974426521602602,
"learning_rate": 2.9374783851240923e-10,
"logits/chosen": 2.2130208015441895,
"logits/rejected": 5.2726945877075195,
"logps/chosen": -810.5875244140625,
"logps/rejected": -1586.91552734375,
"loss": 0.1663,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": -4.543966770172119,
"rewards/margins": 7.790687561035156,
"rewards/rejected": -12.334654808044434,
"step": 1420
},
{
"epoch": 1.984732824427481,
"grad_norm": 22.46417636362631,
"learning_rate": 7.34477487716878e-11,
"logits/chosen": 1.7181364297866821,
"logits/rejected": 4.7832465171813965,
"logps/chosen": -800.3704223632812,
"logps/rejected": -1517.8531494140625,
"loss": 0.1627,
"rewards/accuracies": 0.9375,
"rewards/chosen": -4.434643745422363,
"rewards/margins": 7.220560550689697,
"rewards/rejected": -11.655204772949219,
"step": 1430
},
{
"epoch": 1.9986120749479528,
"grad_norm": 31.109687143592964,
"learning_rate": 0.0,
"logits/chosen": 2.243917942047119,
"logits/rejected": 5.361481666564941,
"logps/chosen": -816.5538940429688,
"logps/rejected": -1620.42529296875,
"loss": 0.1599,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -4.733948707580566,
"rewards/margins": 8.088279724121094,
"rewards/rejected": -12.822227478027344,
"step": 1440
},
{
"epoch": 1.9986120749479528,
"step": 1440,
"total_flos": 0.0,
"train_loss": 0.25082930790053476,
"train_runtime": 42170.4729,
"train_samples_per_second": 8.747,
"train_steps_per_second": 0.034
}
],
"logging_steps": 10,
"max_steps": 1440,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}