zephyr-7b-MI-1-selm2 / trainer_state.json
Teng Xiao
commit from TX
b3b4393
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998691442030882,
"eval_steps": 500,
"global_step": 477,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010468463752944255,
"grad_norm": 12.869433218842474,
"learning_rate": 5.208333333333333e-08,
"logits/chosen": -2.7303454875946045,
"logits/rejected": -2.6748926639556885,
"logps/chosen": -1.0383304357528687,
"logps/rejected": -1.093923807144165,
"loss": 2.0262,
"rewards/accuracies": 0.518750011920929,
"rewards/chosen": -1.0383304357528687,
"rewards/margins": 0.05559344217181206,
"rewards/rejected": -1.093923807144165,
"step": 5
},
{
"epoch": 0.02093692750588851,
"grad_norm": 12.963970831519292,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -2.8506417274475098,
"logits/rejected": -2.7654807567596436,
"logps/chosen": -1.025138020515442,
"logps/rejected": -1.1677943468093872,
"loss": 2.0292,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": -1.025138020515442,
"rewards/margins": 0.1426563411951065,
"rewards/rejected": -1.1677943468093872,
"step": 10
},
{
"epoch": 0.031405391258832765,
"grad_norm": 15.099715830730236,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": -2.77595853805542,
"logits/rejected": -2.7310924530029297,
"logps/chosen": -1.0620777606964111,
"logps/rejected": -1.1130949258804321,
"loss": 2.0243,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -1.0620777606964111,
"rewards/margins": 0.051017045974731445,
"rewards/rejected": -1.1130949258804321,
"step": 15
},
{
"epoch": 0.04187385501177702,
"grad_norm": 14.125874639614404,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -2.798926830291748,
"logits/rejected": -2.6963000297546387,
"logps/chosen": -1.026979684829712,
"logps/rejected": -1.1289321184158325,
"loss": 2.0224,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": -1.026979684829712,
"rewards/margins": 0.1019524484872818,
"rewards/rejected": -1.1289321184158325,
"step": 20
},
{
"epoch": 0.05234231876472128,
"grad_norm": 12.739023934080043,
"learning_rate": 2.604166666666667e-07,
"logits/chosen": -2.797764301300049,
"logits/rejected": -2.6895253658294678,
"logps/chosen": -0.9821299314498901,
"logps/rejected": -1.1730941534042358,
"loss": 1.9952,
"rewards/accuracies": 0.5687500238418579,
"rewards/chosen": -0.9821299314498901,
"rewards/margins": 0.19096432626247406,
"rewards/rejected": -1.1730941534042358,
"step": 25
},
{
"epoch": 0.06281078251766553,
"grad_norm": 18.287014857226495,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.7723119258880615,
"logits/rejected": -2.7337851524353027,
"logps/chosen": -1.0000275373458862,
"logps/rejected": -1.0346484184265137,
"loss": 1.9952,
"rewards/accuracies": 0.5625,
"rewards/chosen": -1.0000275373458862,
"rewards/margins": 0.034620799124240875,
"rewards/rejected": -1.0346484184265137,
"step": 30
},
{
"epoch": 0.07327924627060979,
"grad_norm": 17.8360996219737,
"learning_rate": 3.645833333333333e-07,
"logits/chosen": -2.7883362770080566,
"logits/rejected": -2.715696334838867,
"logps/chosen": -1.0128945112228394,
"logps/rejected": -1.225642442703247,
"loss": 1.9748,
"rewards/accuracies": 0.59375,
"rewards/chosen": -1.0128945112228394,
"rewards/margins": 0.2127479612827301,
"rewards/rejected": -1.225642442703247,
"step": 35
},
{
"epoch": 0.08374771002355404,
"grad_norm": 18.333373463486794,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.7703633308410645,
"logits/rejected": -2.6918962001800537,
"logps/chosen": -1.0058094263076782,
"logps/rejected": -1.131239891052246,
"loss": 1.9619,
"rewards/accuracies": 0.59375,
"rewards/chosen": -1.0058094263076782,
"rewards/margins": 0.12543055415153503,
"rewards/rejected": -1.131239891052246,
"step": 40
},
{
"epoch": 0.0942161737764983,
"grad_norm": 15.971248778309374,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": -2.732913017272949,
"logits/rejected": -2.7122368812561035,
"logps/chosen": -0.9598785638809204,
"logps/rejected": -1.1564022302627563,
"loss": 1.9542,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.9598785638809204,
"rewards/margins": 0.196523517370224,
"rewards/rejected": -1.1564022302627563,
"step": 45
},
{
"epoch": 0.10468463752944256,
"grad_norm": 23.366392169452176,
"learning_rate": 4.999731868769026e-07,
"logits/chosen": -2.672335624694824,
"logits/rejected": -2.6771888732910156,
"logps/chosen": -1.0208399295806885,
"logps/rejected": -1.2945979833602905,
"loss": 1.9581,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.0208399295806885,
"rewards/margins": 0.2737579941749573,
"rewards/rejected": -1.2945979833602905,
"step": 50
},
{
"epoch": 0.11515310128238682,
"grad_norm": 19.929116348471684,
"learning_rate": 4.996716052911017e-07,
"logits/chosen": -2.7723307609558105,
"logits/rejected": -2.747865676879883,
"logps/chosen": -1.0856590270996094,
"logps/rejected": -1.219327449798584,
"loss": 1.9648,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -1.0856590270996094,
"rewards/margins": 0.13366840779781342,
"rewards/rejected": -1.219327449798584,
"step": 55
},
{
"epoch": 0.12562156503533106,
"grad_norm": 23.73282923950602,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": -2.7040231227874756,
"logits/rejected": -2.6466102600097656,
"logps/chosen": -1.0688912868499756,
"logps/rejected": -1.260387659072876,
"loss": 1.9501,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": -1.0688912868499756,
"rewards/margins": 0.19149653613567352,
"rewards/rejected": -1.260387659072876,
"step": 60
},
{
"epoch": 0.1360900287882753,
"grad_norm": 18.942833820850367,
"learning_rate": 4.980652179769217e-07,
"logits/chosen": -2.716078519821167,
"logits/rejected": -2.7220449447631836,
"logps/chosen": -1.027010440826416,
"logps/rejected": -1.2260963916778564,
"loss": 1.9536,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.027010440826416,
"rewards/margins": 0.199085995554924,
"rewards/rejected": -1.2260963916778564,
"step": 65
},
{
"epoch": 0.14655849254121958,
"grad_norm": 22.28146945050057,
"learning_rate": 4.967625656594781e-07,
"logits/chosen": -2.7145752906799316,
"logits/rejected": -2.6811351776123047,
"logps/chosen": -1.074029564857483,
"logps/rejected": -1.3044393062591553,
"loss": 1.9325,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -1.074029564857483,
"rewards/margins": 0.2304096519947052,
"rewards/rejected": -1.3044393062591553,
"step": 70
},
{
"epoch": 0.15702695629416383,
"grad_norm": 30.956539188115524,
"learning_rate": 4.951291206355559e-07,
"logits/chosen": -2.640056610107422,
"logits/rejected": -2.5216145515441895,
"logps/chosen": -1.0524249076843262,
"logps/rejected": -1.358999490737915,
"loss": 1.9439,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -1.0524249076843262,
"rewards/margins": 0.3065745234489441,
"rewards/rejected": -1.358999490737915,
"step": 75
},
{
"epoch": 0.16749542004710807,
"grad_norm": 19.173789722918567,
"learning_rate": 4.93167072587771e-07,
"logits/chosen": -2.5955734252929688,
"logits/rejected": -2.5555953979492188,
"logps/chosen": -1.0017749071121216,
"logps/rejected": -1.4020613431930542,
"loss": 1.9082,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0017749071121216,
"rewards/margins": 0.4002866744995117,
"rewards/rejected": -1.4020613431930542,
"step": 80
},
{
"epoch": 0.17796388380005235,
"grad_norm": 26.77907962647508,
"learning_rate": 4.908790517010636e-07,
"logits/chosen": -2.730196475982666,
"logits/rejected": -2.6316986083984375,
"logps/chosen": -1.0233409404754639,
"logps/rejected": -1.4025558233261108,
"loss": 1.9293,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.0233409404754639,
"rewards/margins": 0.3792147934436798,
"rewards/rejected": -1.4025558233261108,
"step": 85
},
{
"epoch": 0.1884323475529966,
"grad_norm": 20.94451147733862,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": -2.7333931922912598,
"logits/rejected": -2.594924211502075,
"logps/chosen": -1.0183618068695068,
"logps/rejected": -1.4752748012542725,
"loss": 1.903,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.0183618068695068,
"rewards/margins": 0.4569129943847656,
"rewards/rejected": -1.4752748012542725,
"step": 90
},
{
"epoch": 0.19890081130594087,
"grad_norm": 27.23027264774861,
"learning_rate": 4.853377929214243e-07,
"logits/chosen": -2.6479899883270264,
"logits/rejected": -2.612231492996216,
"logps/chosen": -0.9796420335769653,
"logps/rejected": -1.2683651447296143,
"loss": 1.9133,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.9796420335769653,
"rewards/margins": 0.28872328996658325,
"rewards/rejected": -1.2683651447296143,
"step": 95
},
{
"epoch": 0.2093692750588851,
"grad_norm": 20.647765632695478,
"learning_rate": 4.820919832540181e-07,
"logits/chosen": -2.7929205894470215,
"logits/rejected": -2.730536460876465,
"logps/chosen": -1.0671679973602295,
"logps/rejected": -1.3122754096984863,
"loss": 1.9562,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": -1.0671679973602295,
"rewards/margins": 0.24510732293128967,
"rewards/rejected": -1.3122754096984863,
"step": 100
},
{
"epoch": 0.21983773881182936,
"grad_norm": 19.882099174165575,
"learning_rate": 4.785350472409791e-07,
"logits/chosen": -2.7290585041046143,
"logits/rejected": -2.661957263946533,
"logps/chosen": -1.1218279600143433,
"logps/rejected": -1.4676848649978638,
"loss": 1.8476,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.1218279600143433,
"rewards/margins": 0.3458569347858429,
"rewards/rejected": -1.4676848649978638,
"step": 105
},
{
"epoch": 0.23030620256477363,
"grad_norm": 29.356563953547646,
"learning_rate": 4.7467175306295647e-07,
"logits/chosen": -2.677428722381592,
"logits/rejected": -2.587128162384033,
"logps/chosen": -1.0471330881118774,
"logps/rejected": -1.3834892511367798,
"loss": 1.8785,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -1.0471330881118774,
"rewards/margins": 0.33635613322257996,
"rewards/rejected": -1.3834892511367798,
"step": 110
},
{
"epoch": 0.24077466631771788,
"grad_norm": 22.70599557212694,
"learning_rate": 4.70507279583015e-07,
"logits/chosen": -2.699206590652466,
"logits/rejected": -2.6752054691314697,
"logps/chosen": -1.037027359008789,
"logps/rejected": -1.2504864931106567,
"loss": 1.8781,
"rewards/accuracies": 0.625,
"rewards/chosen": -1.037027359008789,
"rewards/margins": 0.21345916390419006,
"rewards/rejected": -1.2504864931106567,
"step": 115
},
{
"epoch": 0.2512431300706621,
"grad_norm": 22.718720973130083,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": -2.7028520107269287,
"logits/rejected": -2.581420660018921,
"logps/chosen": -0.958320140838623,
"logps/rejected": -1.4017829895019531,
"loss": 1.8894,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.958320140838623,
"rewards/margins": 0.44346290826797485,
"rewards/rejected": -1.4017829895019531,
"step": 120
},
{
"epoch": 0.26171159382360637,
"grad_norm": 27.839637111582167,
"learning_rate": 4.612975213859487e-07,
"logits/chosen": -2.5790719985961914,
"logits/rejected": -2.534557342529297,
"logps/chosen": -1.0435621738433838,
"logps/rejected": -1.4774357080459595,
"loss": 1.9045,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0435621738433838,
"rewards/margins": 0.43387335538864136,
"rewards/rejected": -1.4774357080459595,
"step": 125
},
{
"epoch": 0.2721800575765506,
"grad_norm": 24.64049193676725,
"learning_rate": 4.5626458262912735e-07,
"logits/chosen": -2.6262755393981934,
"logits/rejected": -2.5886640548706055,
"logps/chosen": -1.1033378839492798,
"logps/rejected": -1.5101052522659302,
"loss": 1.8774,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.1033378839492798,
"rewards/margins": 0.40676751732826233,
"rewards/rejected": -1.5101052522659302,
"step": 130
},
{
"epoch": 0.2826485213294949,
"grad_norm": 24.054739646210816,
"learning_rate": 4.5095513994085974e-07,
"logits/chosen": -2.5852103233337402,
"logits/rejected": -2.5114357471466064,
"logps/chosen": -1.036170244216919,
"logps/rejected": -1.5170302391052246,
"loss": 1.8964,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.036170244216919,
"rewards/margins": 0.4808600842952728,
"rewards/rejected": -1.5170302391052246,
"step": 135
},
{
"epoch": 0.29311698508243916,
"grad_norm": 32.17420726348548,
"learning_rate": 4.453763107901675e-07,
"logits/chosen": -2.5846521854400635,
"logits/rejected": -2.540843963623047,
"logps/chosen": -1.1467993259429932,
"logps/rejected": -1.3851490020751953,
"loss": 1.958,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -1.1467993259429932,
"rewards/margins": 0.23834970593452454,
"rewards/rejected": -1.3851490020751953,
"step": 140
},
{
"epoch": 0.3035854488353834,
"grad_norm": 31.293592588837402,
"learning_rate": 4.395355737667985e-07,
"logits/chosen": -2.5842862129211426,
"logits/rejected": -2.481231451034546,
"logps/chosen": -1.019995927810669,
"logps/rejected": -1.450305700302124,
"loss": 1.8621,
"rewards/accuracies": 0.625,
"rewards/chosen": -1.019995927810669,
"rewards/margins": 0.43030983209609985,
"rewards/rejected": -1.450305700302124,
"step": 145
},
{
"epoch": 0.31405391258832765,
"grad_norm": 31.395757438350795,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": -2.6442532539367676,
"logits/rejected": -2.5464065074920654,
"logps/chosen": -1.066561222076416,
"logps/rejected": -1.4769625663757324,
"loss": 1.8856,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": -1.066561222076416,
"rewards/margins": 0.4104014039039612,
"rewards/rejected": -1.4769625663757324,
"step": 150
},
{
"epoch": 0.3245223763412719,
"grad_norm": 54.99577539115471,
"learning_rate": 4.271000354423425e-07,
"logits/chosen": -2.584909200668335,
"logits/rejected": -2.5346925258636475,
"logps/chosen": -1.1342883110046387,
"logps/rejected": -1.709241509437561,
"loss": 1.8796,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.1342883110046387,
"rewards/margins": 0.5749532580375671,
"rewards/rejected": -1.709241509437561,
"step": 155
},
{
"epoch": 0.33499084009421615,
"grad_norm": 62.43588869532461,
"learning_rate": 4.2052190435769554e-07,
"logits/chosen": -2.5957446098327637,
"logits/rejected": -2.523685932159424,
"logps/chosen": -1.048396348953247,
"logps/rejected": -1.6725311279296875,
"loss": 1.8954,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.048396348953247,
"rewards/margins": 0.6241349577903748,
"rewards/rejected": -1.6725311279296875,
"step": 160
},
{
"epoch": 0.34545930384716045,
"grad_norm": 33.35503026194792,
"learning_rate": 4.137151834863213e-07,
"logits/chosen": -2.730977773666382,
"logits/rejected": -2.645996570587158,
"logps/chosen": -1.0159308910369873,
"logps/rejected": -1.359212040901184,
"loss": 1.8352,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.0159308910369873,
"rewards/margins": 0.343281090259552,
"rewards/rejected": -1.359212040901184,
"step": 165
},
{
"epoch": 0.3559277676001047,
"grad_norm": 32.04170211845891,
"learning_rate": 4.0668899744407567e-07,
"logits/chosen": -2.7265422344207764,
"logits/rejected": -2.597689151763916,
"logps/chosen": -0.9698517918586731,
"logps/rejected": -1.3197566270828247,
"loss": 1.8688,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.9698517918586731,
"rewards/margins": 0.349904865026474,
"rewards/rejected": -1.3197566270828247,
"step": 170
},
{
"epoch": 0.36639623135304894,
"grad_norm": 27.34421984947454,
"learning_rate": 3.994527650465352e-07,
"logits/chosen": -2.652785062789917,
"logits/rejected": -2.5916075706481934,
"logps/chosen": -0.9570480585098267,
"logps/rejected": -1.3661720752716064,
"loss": 1.8595,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.9570480585098267,
"rewards/margins": 0.409123957157135,
"rewards/rejected": -1.3661720752716064,
"step": 175
},
{
"epoch": 0.3768646951059932,
"grad_norm": 35.429346147889426,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": -2.6718568801879883,
"logits/rejected": -2.613348960876465,
"logps/chosen": -1.037642240524292,
"logps/rejected": -1.553224802017212,
"loss": 1.8512,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.037642240524292,
"rewards/margins": 0.5155824422836304,
"rewards/rejected": -1.553224802017212,
"step": 180
},
{
"epoch": 0.38733315885893743,
"grad_norm": 21.700575234978658,
"learning_rate": 3.8438923131177237e-07,
"logits/chosen": -2.6914803981781006,
"logits/rejected": -2.6440558433532715,
"logps/chosen": -0.9652795791625977,
"logps/rejected": -1.3787884712219238,
"loss": 1.8051,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.9652795791625977,
"rewards/margins": 0.41350871324539185,
"rewards/rejected": -1.3787884712219238,
"step": 185
},
{
"epoch": 0.39780162261188173,
"grad_norm": 32.670099112476585,
"learning_rate": 3.765821230985757e-07,
"logits/chosen": -2.6735928058624268,
"logits/rejected": -2.640225648880005,
"logps/chosen": -1.0144473314285278,
"logps/rejected": -1.4563645124435425,
"loss": 1.8273,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.0144473314285278,
"rewards/margins": 0.4419172406196594,
"rewards/rejected": -1.4563645124435425,
"step": 190
},
{
"epoch": 0.408270086364826,
"grad_norm": 25.238718639520766,
"learning_rate": 3.6860532770864005e-07,
"logits/chosen": -2.705747365951538,
"logits/rejected": -2.6104602813720703,
"logps/chosen": -1.0011558532714844,
"logps/rejected": -1.5988446474075317,
"loss": 1.8926,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.0011558532714844,
"rewards/margins": 0.5976886749267578,
"rewards/rejected": -1.5988446474075317,
"step": 195
},
{
"epoch": 0.4187385501177702,
"grad_norm": 23.97929885445044,
"learning_rate": 3.604695382782159e-07,
"logits/chosen": -2.644472122192383,
"logits/rejected": -2.5559470653533936,
"logps/chosen": -1.0497673749923706,
"logps/rejected": -1.3750804662704468,
"loss": 1.853,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -1.0497673749923706,
"rewards/margins": 0.32531291246414185,
"rewards/rejected": -1.3750804662704468,
"step": 200
},
{
"epoch": 0.42920701387071447,
"grad_norm": 24.184854862916072,
"learning_rate": 3.5218566107988867e-07,
"logits/chosen": -2.730355739593506,
"logits/rejected": -2.683979034423828,
"logps/chosen": -0.9922051429748535,
"logps/rejected": -1.5398856401443481,
"loss": 1.8724,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.9922051429748535,
"rewards/margins": 0.5476802587509155,
"rewards/rejected": -1.5398856401443481,
"step": 205
},
{
"epoch": 0.4396754776236587,
"grad_norm": 26.69782735208681,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": -2.5600266456604004,
"logits/rejected": -2.5641884803771973,
"logps/chosen": -1.0444707870483398,
"logps/rejected": -1.4003455638885498,
"loss": 1.8764,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.0444707870483398,
"rewards/margins": 0.35587459802627563,
"rewards/rejected": -1.4003455638885498,
"step": 210
},
{
"epoch": 0.45014394137660296,
"grad_norm": 39.9600540211152,
"learning_rate": 3.3521824616429284e-07,
"logits/chosen": -2.594709873199463,
"logits/rejected": -2.5683107376098633,
"logps/chosen": -1.126425862312317,
"logps/rejected": -1.614018440246582,
"loss": 1.8882,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.126425862312317,
"rewards/margins": 0.4875926077365875,
"rewards/rejected": -1.614018440246582,
"step": 215
},
{
"epoch": 0.46061240512954726,
"grad_norm": 35.61682268118621,
"learning_rate": 3.265574537815398e-07,
"logits/chosen": -2.5961153507232666,
"logits/rejected": -2.541783094406128,
"logps/chosen": -1.102477788925171,
"logps/rejected": -1.5613858699798584,
"loss": 1.8517,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.102477788925171,
"rewards/margins": 0.45890823006629944,
"rewards/rejected": -1.5613858699798584,
"step": 220
},
{
"epoch": 0.4710808688824915,
"grad_norm": 28.356766220400576,
"learning_rate": 3.1779403380910425e-07,
"logits/chosen": -2.6027655601501465,
"logits/rejected": -2.4865376949310303,
"logps/chosen": -1.0470962524414062,
"logps/rejected": -1.5330448150634766,
"loss": 1.8686,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.0470962524414062,
"rewards/margins": 0.4859485626220703,
"rewards/rejected": -1.5330448150634766,
"step": 225
},
{
"epoch": 0.48154933263543576,
"grad_norm": 46.96042544932364,
"learning_rate": 3.0893973387735683e-07,
"logits/chosen": -2.6151013374328613,
"logits/rejected": -2.59317684173584,
"logps/chosen": -1.0420851707458496,
"logps/rejected": -1.637976050376892,
"loss": 1.8322,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0420851707458496,
"rewards/margins": 0.5958911180496216,
"rewards/rejected": -1.637976050376892,
"step": 230
},
{
"epoch": 0.49201779638838,
"grad_norm": 26.100391626514853,
"learning_rate": 3.000064234440111e-07,
"logits/chosen": -2.5428245067596436,
"logits/rejected": -2.4851417541503906,
"logps/chosen": -1.0481144189834595,
"logps/rejected": -1.4148355722427368,
"loss": 1.8385,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.0481144189834595,
"rewards/margins": 0.3667212724685669,
"rewards/rejected": -1.4148355722427368,
"step": 235
},
{
"epoch": 0.5024862601413242,
"grad_norm": 24.14989026973158,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": -2.537334680557251,
"logits/rejected": -2.472512722015381,
"logps/chosen": -1.0776469707489014,
"logps/rejected": -1.6333630084991455,
"loss": 1.8508,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0776469707489014,
"rewards/margins": 0.5557159185409546,
"rewards/rejected": -1.6333630084991455,
"step": 240
},
{
"epoch": 0.5129547238942685,
"grad_norm": 32.38845341862983,
"learning_rate": 2.8195076242990116e-07,
"logits/chosen": -2.6101131439208984,
"logits/rejected": -2.5712387561798096,
"logps/chosen": -0.9908210635185242,
"logps/rejected": -1.6324228048324585,
"loss": 1.8075,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.9908210635185242,
"rewards/margins": 0.6416017413139343,
"rewards/rejected": -1.6324228048324585,
"step": 245
},
{
"epoch": 0.5234231876472127,
"grad_norm": 24.243213200791057,
"learning_rate": 2.7285261601056697e-07,
"logits/chosen": -2.559380292892456,
"logits/rejected": -2.4537248611450195,
"logps/chosen": -1.0512077808380127,
"logps/rejected": -1.6259485483169556,
"loss": 1.8615,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0512077808380127,
"rewards/margins": 0.5747405290603638,
"rewards/rejected": -1.6259485483169556,
"step": 250
},
{
"epoch": 0.533891651400157,
"grad_norm": 24.228357677990427,
"learning_rate": 2.6372383496608186e-07,
"logits/chosen": -2.654383420944214,
"logits/rejected": -2.566788911819458,
"logps/chosen": -1.0131515264511108,
"logps/rejected": -1.4951848983764648,
"loss": 1.8526,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -1.0131515264511108,
"rewards/margins": 0.4820333421230316,
"rewards/rejected": -1.4951848983764648,
"step": 255
},
{
"epoch": 0.5443601151531012,
"grad_norm": 26.47263727780241,
"learning_rate": 2.5457665670441937e-07,
"logits/chosen": -2.597144603729248,
"logits/rejected": -2.572925090789795,
"logps/chosen": -0.9740694761276245,
"logps/rejected": -1.3979696035385132,
"loss": 1.8422,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.9740694761276245,
"rewards/margins": 0.4239000678062439,
"rewards/rejected": -1.3979696035385132,
"step": 260
},
{
"epoch": 0.5548285789060455,
"grad_norm": 22.019445712488015,
"learning_rate": 2.454233432955807e-07,
"logits/chosen": -2.612497329711914,
"logits/rejected": -2.5739381313323975,
"logps/chosen": -1.0217043161392212,
"logps/rejected": -1.4334819316864014,
"loss": 1.8467,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0217043161392212,
"rewards/margins": 0.4117775857448578,
"rewards/rejected": -1.4334819316864014,
"step": 265
},
{
"epoch": 0.5652970426589898,
"grad_norm": 31.0373082982515,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": -2.673020839691162,
"logits/rejected": -2.6832852363586426,
"logps/chosen": -1.1109861135482788,
"logps/rejected": -1.4625297784805298,
"loss": 1.8524,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.1109861135482788,
"rewards/margins": 0.3515436351299286,
"rewards/rejected": -1.4625297784805298,
"step": 270
},
{
"epoch": 0.575765506411934,
"grad_norm": 29.601053617457204,
"learning_rate": 2.2714738398943308e-07,
"logits/chosen": -2.6691536903381348,
"logits/rejected": -2.587501049041748,
"logps/chosen": -1.0023844242095947,
"logps/rejected": -1.6990854740142822,
"loss": 1.7818,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0023844242095947,
"rewards/margins": 0.6967008113861084,
"rewards/rejected": -1.6990854740142822,
"step": 275
},
{
"epoch": 0.5862339701648783,
"grad_norm": 23.304378897117243,
"learning_rate": 2.1804923757009882e-07,
"logits/chosen": -2.7478485107421875,
"logits/rejected": -2.6739115715026855,
"logps/chosen": -0.967931866645813,
"logps/rejected": -1.6212472915649414,
"loss": 1.8289,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.967931866645813,
"rewards/margins": 0.6533153653144836,
"rewards/rejected": -1.6212472915649414,
"step": 280
},
{
"epoch": 0.5967024339178225,
"grad_norm": 30.765932632186015,
"learning_rate": 2.089939221172446e-07,
"logits/chosen": -2.683306932449341,
"logits/rejected": -2.599849224090576,
"logps/chosen": -1.079810380935669,
"logps/rejected": -1.5674939155578613,
"loss": 1.8595,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.079810380935669,
"rewards/margins": 0.4876834750175476,
"rewards/rejected": -1.5674939155578613,
"step": 285
},
{
"epoch": 0.6071708976707668,
"grad_norm": 29.814903147144946,
"learning_rate": 1.9999357655598891e-07,
"logits/chosen": -2.6533594131469727,
"logits/rejected": -2.5845093727111816,
"logps/chosen": -1.0454188585281372,
"logps/rejected": -1.3878004550933838,
"loss": 1.824,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.0454188585281372,
"rewards/margins": 0.3423817753791809,
"rewards/rejected": -1.3878004550933838,
"step": 290
},
{
"epoch": 0.6176393614237111,
"grad_norm": 27.45666647767728,
"learning_rate": 1.9106026612264315e-07,
"logits/chosen": -2.6601853370666504,
"logits/rejected": -2.6058852672576904,
"logps/chosen": -1.038104772567749,
"logps/rejected": -1.3897347450256348,
"loss": 1.8319,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.038104772567749,
"rewards/margins": 0.3516300320625305,
"rewards/rejected": -1.3897347450256348,
"step": 295
},
{
"epoch": 0.6281078251766553,
"grad_norm": 30.25718786399205,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": -2.5553667545318604,
"logits/rejected": -2.451190233230591,
"logps/chosen": -1.108663558959961,
"logps/rejected": -1.5200892686843872,
"loss": 1.8653,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.108663558959961,
"rewards/margins": 0.4114256501197815,
"rewards/rejected": -1.5200892686843872,
"step": 300
},
{
"epoch": 0.6385762889295996,
"grad_norm": 30.020985917091327,
"learning_rate": 1.7344254621846017e-07,
"logits/chosen": -2.640211343765259,
"logits/rejected": -2.6043474674224854,
"logps/chosen": -1.0029397010803223,
"logps/rejected": -1.265366792678833,
"loss": 1.8019,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": -1.0029397010803223,
"rewards/margins": 0.2624271512031555,
"rewards/rejected": -1.265366792678833,
"step": 305
},
{
"epoch": 0.6490447526825438,
"grad_norm": 35.00385114807507,
"learning_rate": 1.647817538357072e-07,
"logits/chosen": -2.629307270050049,
"logits/rejected": -2.4974796772003174,
"logps/chosen": -1.0108293294906616,
"logps/rejected": -1.5987032651901245,
"loss": 1.825,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.0108293294906616,
"rewards/margins": 0.5878738164901733,
"rewards/rejected": -1.5987032651901245,
"step": 310
},
{
"epoch": 0.6595132164354881,
"grad_norm": 33.95334568069759,
"learning_rate": 1.562351990976095e-07,
"logits/chosen": -2.6276791095733643,
"logits/rejected": -2.503952980041504,
"logps/chosen": -1.0764460563659668,
"logps/rejected": -1.5906479358673096,
"loss": 1.8359,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.0764460563659668,
"rewards/margins": 0.514201819896698,
"rewards/rejected": -1.5906479358673096,
"step": 315
},
{
"epoch": 0.6699816801884323,
"grad_norm": 29.685480077784828,
"learning_rate": 1.478143389201113e-07,
"logits/chosen": -2.592771291732788,
"logits/rejected": -2.5038256645202637,
"logps/chosen": -1.034784197807312,
"logps/rejected": -1.6445497274398804,
"loss": 1.7974,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.034784197807312,
"rewards/margins": 0.6097655892372131,
"rewards/rejected": -1.6445497274398804,
"step": 320
},
{
"epoch": 0.6804501439413766,
"grad_norm": 45.93159466769048,
"learning_rate": 1.3953046172178413e-07,
"logits/chosen": -2.5624806880950928,
"logits/rejected": -2.4602789878845215,
"logps/chosen": -1.1025031805038452,
"logps/rejected": -1.7038898468017578,
"loss": 1.8729,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.1025031805038452,
"rewards/margins": 0.6013867855072021,
"rewards/rejected": -1.7038898468017578,
"step": 325
},
{
"epoch": 0.6909186076943209,
"grad_norm": 31.678205608236812,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": -2.599463939666748,
"logits/rejected": -2.5258090496063232,
"logps/chosen": -0.9680983424186707,
"logps/rejected": -1.6296237707138062,
"loss": 1.8012,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.9680983424186707,
"rewards/margins": 0.6615251302719116,
"rewards/rejected": -1.6296237707138062,
"step": 330
},
{
"epoch": 0.7013870714472651,
"grad_norm": 37.038121368656675,
"learning_rate": 1.2341787690142435e-07,
"logits/chosen": -2.5534796714782715,
"logits/rejected": -2.517007827758789,
"logps/chosen": -1.1220486164093018,
"logps/rejected": -1.6694402694702148,
"loss": 1.8584,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.1220486164093018,
"rewards/margins": 0.5473915338516235,
"rewards/rejected": -1.6694402694702148,
"step": 335
},
{
"epoch": 0.7118555352002094,
"grad_norm": 32.99732563960218,
"learning_rate": 1.1561076868822755e-07,
"logits/chosen": -2.58585524559021,
"logits/rejected": -2.4757778644561768,
"logps/chosen": -1.0188167095184326,
"logps/rejected": -1.71088445186615,
"loss": 1.8167,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0188167095184326,
"rewards/margins": 0.6920677423477173,
"rewards/rejected": -1.71088445186615,
"step": 340
},
{
"epoch": 0.7223239989531536,
"grad_norm": 31.150959005223893,
"learning_rate": 1.0798381331721107e-07,
"logits/chosen": -2.595191478729248,
"logits/rejected": -2.491788387298584,
"logps/chosen": -1.0228018760681152,
"logps/rejected": -1.5643987655639648,
"loss": 1.8471,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.0228018760681152,
"rewards/margins": 0.5415968298912048,
"rewards/rejected": -1.5643987655639648,
"step": 345
},
{
"epoch": 0.7327924627060979,
"grad_norm": 32.974367073048874,
"learning_rate": 1.0054723495346482e-07,
"logits/chosen": -2.4937729835510254,
"logits/rejected": -2.3299617767333984,
"logps/chosen": -1.100921630859375,
"logps/rejected": -1.6467349529266357,
"loss": 1.8361,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.100921630859375,
"rewards/margins": 0.5458132028579712,
"rewards/rejected": -1.6467349529266357,
"step": 350
},
{
"epoch": 0.7432609264590422,
"grad_norm": 28.926067639567258,
"learning_rate": 9.331100255592436e-08,
"logits/chosen": -2.4389522075653076,
"logits/rejected": -2.374527931213379,
"logps/chosen": -1.0656179189682007,
"logps/rejected": -1.67294442653656,
"loss": 1.8606,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0656179189682007,
"rewards/margins": 0.6073265075683594,
"rewards/rejected": -1.67294442653656,
"step": 355
},
{
"epoch": 0.7537293902119864,
"grad_norm": 41.902021950847704,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": -2.497872829437256,
"logits/rejected": -2.4256420135498047,
"logps/chosen": -1.0714236497879028,
"logps/rejected": -1.6986573934555054,
"loss": 1.8331,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.0714236497879028,
"rewards/margins": 0.6272337436676025,
"rewards/rejected": -1.6986573934555054,
"step": 360
},
{
"epoch": 0.7641978539649307,
"grad_norm": 31.435500480488756,
"learning_rate": 7.947809564230445e-08,
"logits/chosen": -2.532473087310791,
"logits/rejected": -2.366628646850586,
"logps/chosen": -1.0359852313995361,
"logps/rejected": -1.7668260335922241,
"loss": 1.7899,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0359852313995361,
"rewards/margins": 0.730840802192688,
"rewards/rejected": -1.7668260335922241,
"step": 365
},
{
"epoch": 0.7746663177178749,
"grad_norm": 39.54146493100524,
"learning_rate": 7.289996455765748e-08,
"logits/chosen": -2.484086513519287,
"logits/rejected": -2.349870204925537,
"logps/chosen": -1.0333083868026733,
"logps/rejected": -1.6121408939361572,
"loss": 1.7669,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.0333083868026733,
"rewards/margins": 0.5788324475288391,
"rewards/rejected": -1.6121408939361572,
"step": 370
},
{
"epoch": 0.7851347814708192,
"grad_norm": 44.26258075929706,
"learning_rate": 6.655924144404906e-08,
"logits/chosen": -2.413093328475952,
"logits/rejected": -2.3522191047668457,
"logps/chosen": -0.994648277759552,
"logps/rejected": -1.8310960531234741,
"loss": 1.8098,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.994648277759552,
"rewards/margins": 0.8364478945732117,
"rewards/rejected": -1.8310960531234741,
"step": 375
},
{
"epoch": 0.7956032452237635,
"grad_norm": 42.81392142197046,
"learning_rate": 6.046442623320145e-08,
"logits/chosen": -2.5220797061920166,
"logits/rejected": -2.5181660652160645,
"logps/chosen": -1.0257781744003296,
"logps/rejected": -1.565146803855896,
"loss": 1.848,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0257781744003296,
"rewards/margins": 0.5393685102462769,
"rewards/rejected": -1.565146803855896,
"step": 380
},
{
"epoch": 0.8060717089767077,
"grad_norm": 29.963279293888323,
"learning_rate": 5.4623689209832484e-08,
"logits/chosen": -2.4084603786468506,
"logits/rejected": -2.3918216228485107,
"logps/chosen": -1.0294010639190674,
"logps/rejected": -1.5665457248687744,
"loss": 1.8117,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.0294010639190674,
"rewards/margins": 0.537144660949707,
"rewards/rejected": -1.5665457248687744,
"step": 385
},
{
"epoch": 0.816540172729652,
"grad_norm": 33.04796978561497,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": -2.4773898124694824,
"logits/rejected": -2.334437608718872,
"logps/chosen": -1.0476093292236328,
"logps/rejected": -1.5581729412078857,
"loss": 1.8541,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.0476093292236328,
"rewards/margins": 0.5105637311935425,
"rewards/rejected": -1.5581729412078857,
"step": 390
},
{
"epoch": 0.8270086364825961,
"grad_norm": 36.23962004411558,
"learning_rate": 4.373541737087263e-08,
"logits/chosen": -2.4543843269348145,
"logits/rejected": -2.3622539043426514,
"logps/chosen": -1.0004545450210571,
"logps/rejected": -1.7118451595306396,
"loss": 1.822,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0004545450210571,
"rewards/margins": 0.7113906145095825,
"rewards/rejected": -1.7118451595306396,
"step": 395
},
{
"epoch": 0.8374771002355405,
"grad_norm": 35.25243486313715,
"learning_rate": 3.8702478614051345e-08,
"logits/chosen": -2.4065768718719482,
"logits/rejected": -2.2744433879852295,
"logps/chosen": -1.054172396659851,
"logps/rejected": -1.5296390056610107,
"loss": 1.8213,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.054172396659851,
"rewards/margins": 0.4754667282104492,
"rewards/rejected": -1.5296390056610107,
"step": 400
},
{
"epoch": 0.8479455639884846,
"grad_norm": 37.565064056547584,
"learning_rate": 3.3952790595787986e-08,
"logits/chosen": -2.449446439743042,
"logits/rejected": -2.3016552925109863,
"logps/chosen": -1.0504591464996338,
"logps/rejected": -1.7570114135742188,
"loss": 1.8237,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0504591464996338,
"rewards/margins": 0.7065523266792297,
"rewards/rejected": -1.7570114135742188,
"step": 405
},
{
"epoch": 0.8584140277414289,
"grad_norm": 32.90354747950104,
"learning_rate": 2.9492720416985e-08,
"logits/chosen": -2.3721275329589844,
"logits/rejected": -2.268225908279419,
"logps/chosen": -1.1106208562850952,
"logps/rejected": -1.9150855541229248,
"loss": 1.8043,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.1106208562850952,
"rewards/margins": 0.8044648170471191,
"rewards/rejected": -1.9150855541229248,
"step": 410
},
{
"epoch": 0.8688824914943732,
"grad_norm": 56.846827175134244,
"learning_rate": 2.5328246937043525e-08,
"logits/chosen": -2.4376819133758545,
"logits/rejected": -2.3333914279937744,
"logps/chosen": -1.1099225282669067,
"logps/rejected": -1.6782783269882202,
"loss": 1.8252,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.1099225282669067,
"rewards/margins": 0.568355917930603,
"rewards/rejected": -1.6782783269882202,
"step": 415
},
{
"epoch": 0.8793509552473174,
"grad_norm": 43.77757640842041,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": -2.4453635215759277,
"logits/rejected": -2.2761971950531006,
"logps/chosen": -1.050205111503601,
"logps/rejected": -1.8707281351089478,
"loss": 1.7931,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.050205111503601,
"rewards/margins": 0.8205229640007019,
"rewards/rejected": -1.8707281351089478,
"step": 420
},
{
"epoch": 0.8898194190002617,
"grad_norm": 37.41494353520166,
"learning_rate": 1.7908016745981856e-08,
"logits/chosen": -2.448782444000244,
"logits/rejected": -2.2901499271392822,
"logps/chosen": -1.043718695640564,
"logps/rejected": -1.739383339881897,
"loss": 1.8267,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.043718695640564,
"rewards/margins": 0.695664644241333,
"rewards/rejected": -1.739383339881897,
"step": 425
},
{
"epoch": 0.9002878827532059,
"grad_norm": 30.693244043593708,
"learning_rate": 1.4662207078575684e-08,
"logits/chosen": -2.4449446201324463,
"logits/rejected": -2.294093608856201,
"logps/chosen": -1.0893933773040771,
"logps/rejected": -1.642295241355896,
"loss": 1.7605,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0893933773040771,
"rewards/margins": 0.5529020428657532,
"rewards/rejected": -1.642295241355896,
"step": 430
},
{
"epoch": 0.9107563465061502,
"grad_norm": 33.12531830225382,
"learning_rate": 1.1731874863145142e-08,
"logits/chosen": -2.3656153678894043,
"logits/rejected": -2.352541208267212,
"logps/chosen": -0.9648414850234985,
"logps/rejected": -1.5097295045852661,
"loss": 1.7984,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.9648414850234985,
"rewards/margins": 0.5448881983757019,
"rewards/rejected": -1.5097295045852661,
"step": 435
},
{
"epoch": 0.9212248102590945,
"grad_norm": 40.86500194765816,
"learning_rate": 9.12094829893642e-09,
"logits/chosen": -2.48162579536438,
"logits/rejected": -2.3563125133514404,
"logps/chosen": -1.0542184114456177,
"logps/rejected": -1.5571963787078857,
"loss": 1.8252,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.0542184114456177,
"rewards/margins": 0.5029779672622681,
"rewards/rejected": -1.5571963787078857,
"step": 440
},
{
"epoch": 0.9316932740120387,
"grad_norm": 30.792748106701044,
"learning_rate": 6.832927412229017e-09,
"logits/chosen": -2.399744987487793,
"logits/rejected": -2.232243061065674,
"logps/chosen": -1.0769870281219482,
"logps/rejected": -1.728468656539917,
"loss": 1.7915,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0769870281219482,
"rewards/margins": 0.6514816284179688,
"rewards/rejected": -1.728468656539917,
"step": 445
},
{
"epoch": 0.942161737764983,
"grad_norm": 29.999766652539517,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": -2.459473133087158,
"logits/rejected": -2.2956230640411377,
"logps/chosen": -1.0055310726165771,
"logps/rejected": -1.5959385633468628,
"loss": 1.8126,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0055310726165771,
"rewards/margins": 0.5904075503349304,
"rewards/rejected": -1.5959385633468628,
"step": 450
},
{
"epoch": 0.9526302015179272,
"grad_norm": 36.992525127734154,
"learning_rate": 3.2374343405217884e-09,
"logits/chosen": -2.42592191696167,
"logits/rejected": -2.3259921073913574,
"logps/chosen": -1.0747435092926025,
"logps/rejected": -1.6361202001571655,
"loss": 1.8922,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0747435092926025,
"rewards/margins": 0.5613764524459839,
"rewards/rejected": -1.6361202001571655,
"step": 455
},
{
"epoch": 0.9630986652708715,
"grad_norm": 43.04775872660601,
"learning_rate": 1.9347820230782295e-09,
"logits/chosen": -2.488020896911621,
"logits/rejected": -2.239854097366333,
"logps/chosen": -0.9606490135192871,
"logps/rejected": -1.82891845703125,
"loss": 1.8115,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.9606490135192871,
"rewards/margins": 0.8682695627212524,
"rewards/rejected": -1.82891845703125,
"step": 460
},
{
"epoch": 0.9735671290238157,
"grad_norm": 25.076712480507634,
"learning_rate": 9.64668657069706e-10,
"logits/chosen": -2.4506993293762207,
"logits/rejected": -2.252009868621826,
"logps/chosen": -1.0200697183609009,
"logps/rejected": -1.8124902248382568,
"loss": 1.8012,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.0200697183609009,
"rewards/margins": 0.7924206852912903,
"rewards/rejected": -1.8124902248382568,
"step": 465
},
{
"epoch": 0.98403559277676,
"grad_norm": 26.74704043050654,
"learning_rate": 3.2839470889836627e-10,
"logits/chosen": -2.3772401809692383,
"logits/rejected": -2.2120604515075684,
"logps/chosen": -0.9293681979179382,
"logps/rejected": -1.7934061288833618,
"loss": 1.7671,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.9293681979179382,
"rewards/margins": 0.8640381097793579,
"rewards/rejected": -1.7934061288833618,
"step": 470
},
{
"epoch": 0.9945040565297043,
"grad_norm": 50.411989213964986,
"learning_rate": 2.6813123097352287e-11,
"logits/chosen": -2.4256701469421387,
"logits/rejected": -2.3270206451416016,
"logps/chosen": -0.9691774249076843,
"logps/rejected": -1.615407943725586,
"loss": 1.797,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.9691774249076843,
"rewards/margins": 0.6462305784225464,
"rewards/rejected": -1.615407943725586,
"step": 475
},
{
"epoch": 0.998691442030882,
"step": 477,
"total_flos": 0.0,
"train_loss": 1.8663299063716545,
"train_runtime": 8277.1533,
"train_samples_per_second": 7.386,
"train_steps_per_second": 0.058
}
],
"logging_steps": 5,
"max_steps": 477,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}