|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.989547038327526, |
|
"eval_steps": 50, |
|
"global_step": 429, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06968641114982578, |
|
"grad_norm": 8.802282592203445, |
|
"learning_rate": 1.1627906976744186e-07, |
|
"logits/chosen": -2.5104970932006836, |
|
"logits/rejected": -2.4596619606018066, |
|
"logps/chosen": -224.5651397705078, |
|
"logps/rejected": -205.2760772705078, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.00018909585196524858, |
|
"rewards/margins": 0.0003898117574863136, |
|
"rewards/rejected": -0.00020071589096914977, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13937282229965156, |
|
"grad_norm": 8.00329664970527, |
|
"learning_rate": 2.3255813953488372e-07, |
|
"logits/chosen": -2.5501396656036377, |
|
"logits/rejected": -2.4160428047180176, |
|
"logps/chosen": -232.53744506835938, |
|
"logps/rejected": -201.850341796875, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.00012921685993205756, |
|
"rewards/margins": 0.003956976812332869, |
|
"rewards/rejected": -0.003827760461717844, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20905923344947736, |
|
"grad_norm": 9.21698829498988, |
|
"learning_rate": 3.4883720930232557e-07, |
|
"logits/chosen": -2.546245813369751, |
|
"logits/rejected": -2.463120937347412, |
|
"logps/chosen": -226.8832244873047, |
|
"logps/rejected": -239.523681640625, |
|
"loss": 0.6743, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 0.014942864887416363, |
|
"rewards/margins": 0.03859414532780647, |
|
"rewards/rejected": -0.023651275783777237, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2787456445993031, |
|
"grad_norm": 12.35483210006122, |
|
"learning_rate": 4.6511627906976743e-07, |
|
"logits/chosen": -2.4866766929626465, |
|
"logits/rejected": -2.4126744270324707, |
|
"logps/chosen": -231.99227905273438, |
|
"logps/rejected": -216.04421997070312, |
|
"loss": 0.6353, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.054218046367168427, |
|
"rewards/margins": 0.13844327628612518, |
|
"rewards/rejected": -0.08422522246837616, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"grad_norm": 13.315168659489975, |
|
"learning_rate": 4.995943852340362e-07, |
|
"logits/chosen": -2.4557487964630127, |
|
"logits/rejected": -2.3202855587005615, |
|
"logps/chosen": -229.59341430664062, |
|
"logps/rejected": -249.4304656982422, |
|
"loss": 0.5611, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.050500113517045975, |
|
"rewards/margins": 0.34012553095817566, |
|
"rewards/rejected": -0.39062565565109253, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"eval_logits/chosen": -2.5442895889282227, |
|
"eval_logits/rejected": -2.5030057430267334, |
|
"eval_logps/chosen": -271.88238525390625, |
|
"eval_logps/rejected": -298.31842041015625, |
|
"eval_loss": 0.6184219717979431, |
|
"eval_rewards/accuracies": 0.6875, |
|
"eval_rewards/chosen": -0.09252451360225677, |
|
"eval_rewards/margins": 0.26403892040252686, |
|
"eval_rewards/rejected": -0.35656341910362244, |
|
"eval_runtime": 102.8631, |
|
"eval_samples_per_second": 19.443, |
|
"eval_steps_per_second": 0.311, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4181184668989547, |
|
"grad_norm": 24.301334749463223, |
|
"learning_rate": 4.976108685115826e-07, |
|
"logits/chosen": -2.1179282665252686, |
|
"logits/rejected": -1.9562362432479858, |
|
"logps/chosen": -267.93402099609375, |
|
"logps/rejected": -289.59161376953125, |
|
"loss": 0.4883, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.27572691440582275, |
|
"rewards/margins": 0.5648951530456543, |
|
"rewards/rejected": -0.840622067451477, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 20.753816184751873, |
|
"learning_rate": 4.939880644182383e-07, |
|
"logits/chosen": -1.034070372581482, |
|
"logits/rejected": -0.7417710423469543, |
|
"logps/chosen": -269.5135803222656, |
|
"logps/rejected": -375.80865478515625, |
|
"loss": 0.3948, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.5687657594680786, |
|
"rewards/margins": 1.0274503231048584, |
|
"rewards/rejected": -1.5962162017822266, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5574912891986062, |
|
"grad_norm": 24.52021049371824, |
|
"learning_rate": 4.887499574302625e-07, |
|
"logits/chosen": -0.6896623373031616, |
|
"logits/rejected": -0.026825392618775368, |
|
"logps/chosen": -287.94683837890625, |
|
"logps/rejected": -470.5874938964844, |
|
"loss": 0.3522, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": -0.6946723461151123, |
|
"rewards/margins": 1.3733885288238525, |
|
"rewards/rejected": -2.068060874938965, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.627177700348432, |
|
"grad_norm": 24.824006994081103, |
|
"learning_rate": 4.819312260037522e-07, |
|
"logits/chosen": -0.034453898668289185, |
|
"logits/rejected": 1.0798954963684082, |
|
"logps/chosen": -335.093994140625, |
|
"logps/rejected": -479.3101501464844, |
|
"loss": 0.3311, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.8856996297836304, |
|
"rewards/margins": 1.5640310049057007, |
|
"rewards/rejected": -2.44973087310791, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"grad_norm": 29.51099669244077, |
|
"learning_rate": 4.7357701298877766e-07, |
|
"logits/chosen": 0.3334025740623474, |
|
"logits/rejected": 1.538297176361084, |
|
"logps/chosen": -331.7084045410156, |
|
"logps/rejected": -487.90850830078125, |
|
"loss": 0.3114, |
|
"rewards/accuracies": 0.8687499761581421, |
|
"rewards/chosen": -0.964444637298584, |
|
"rewards/margins": 1.6006944179534912, |
|
"rewards/rejected": -2.5651392936706543, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"eval_logits/chosen": 0.8929426670074463, |
|
"eval_logits/rejected": 1.4735616445541382, |
|
"eval_logps/chosen": -390.1243591308594, |
|
"eval_logps/rejected": -486.2124938964844, |
|
"eval_loss": 0.5683817267417908, |
|
"eval_rewards/accuracies": 0.71875, |
|
"eval_rewards/chosen": -1.2749444246292114, |
|
"eval_rewards/margins": 0.9605594873428345, |
|
"eval_rewards/rejected": -2.235503911972046, |
|
"eval_runtime": 103.1315, |
|
"eval_samples_per_second": 19.393, |
|
"eval_steps_per_second": 0.31, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7665505226480837, |
|
"grad_norm": 41.99375505795103, |
|
"learning_rate": 4.637426267648599e-07, |
|
"logits/chosen": 0.6614994406700134, |
|
"logits/rejected": 2.438904285430908, |
|
"logps/chosen": -366.45001220703125, |
|
"logps/rejected": -493.54083251953125, |
|
"loss": 0.3086, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.182141661643982, |
|
"rewards/margins": 1.715505599975586, |
|
"rewards/rejected": -2.8976473808288574, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8362369337979094, |
|
"grad_norm": 25.617638996097494, |
|
"learning_rate": 4.5249317507639726e-07, |
|
"logits/chosen": 0.8210003972053528, |
|
"logits/rejected": 3.049501657485962, |
|
"logps/chosen": -359.2828674316406, |
|
"logps/rejected": -492.6153259277344, |
|
"loss": 0.2811, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": -1.1480309963226318, |
|
"rewards/margins": 1.9257586002349854, |
|
"rewards/rejected": -3.073789596557617, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9059233449477352, |
|
"grad_norm": 35.44641711552981, |
|
"learning_rate": 4.399031339922038e-07, |
|
"logits/chosen": 0.930186927318573, |
|
"logits/rejected": 2.8596444129943848, |
|
"logps/chosen": -327.95440673828125, |
|
"logps/rejected": -511.48974609375, |
|
"loss": 0.2937, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": -1.1231250762939453, |
|
"rewards/margins": 2.0866007804870605, |
|
"rewards/rejected": -3.2097256183624268, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 35.926943082564264, |
|
"learning_rate": 4.2605585484282636e-07, |
|
"logits/chosen": 1.312204122543335, |
|
"logits/rejected": 3.0301642417907715, |
|
"logps/chosen": -330.2955627441406, |
|
"logps/rejected": -519.2758178710938, |
|
"loss": 0.287, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.2009658813476562, |
|
"rewards/margins": 2.0115137100219727, |
|
"rewards/rejected": -3.21247935295105, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0452961672473868, |
|
"grad_norm": 32.033284996332334, |
|
"learning_rate": 4.110430123999227e-07, |
|
"logits/chosen": 0.659065842628479, |
|
"logits/rejected": 3.104271650314331, |
|
"logps/chosen": -317.4303894042969, |
|
"logps/rejected": -531.0374755859375, |
|
"loss": 0.2115, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -0.8270686268806458, |
|
"rewards/margins": 2.510606527328491, |
|
"rewards/rejected": -3.337675094604492, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0452961672473868, |
|
"eval_logits/chosen": 0.8515522480010986, |
|
"eval_logits/rejected": 1.7463769912719727, |
|
"eval_logps/chosen": -381.556884765625, |
|
"eval_logps/rejected": -500.3030090332031, |
|
"eval_loss": 0.5424056649208069, |
|
"eval_rewards/accuracies": 0.734375, |
|
"eval_rewards/chosen": -1.1892690658569336, |
|
"eval_rewards/margins": 1.1871401071548462, |
|
"eval_rewards/rejected": -2.3764092922210693, |
|
"eval_runtime": 105.3616, |
|
"eval_samples_per_second": 18.982, |
|
"eval_steps_per_second": 0.304, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1149825783972125, |
|
"grad_norm": 24.792386221394285, |
|
"learning_rate": 3.9496399795098266e-07, |
|
"logits/chosen": 1.6556669473648071, |
|
"logits/rejected": 3.6661171913146973, |
|
"logps/chosen": -347.97235107421875, |
|
"logps/rejected": -595.5989379882812, |
|
"loss": 0.1821, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.170838713645935, |
|
"rewards/margins": 2.654542922973633, |
|
"rewards/rejected": -3.8253815174102783, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.1846689895470384, |
|
"grad_norm": 23.32201234382713, |
|
"learning_rate": 3.779252612874913e-07, |
|
"logits/chosen": 1.7220815420150757, |
|
"logits/rejected": 3.848780393600464, |
|
"logps/chosen": -399.74237060546875, |
|
"logps/rejected": -668.2623901367188, |
|
"loss": 0.16, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -1.4958232641220093, |
|
"rewards/margins": 2.804189920425415, |
|
"rewards/rejected": -4.300013065338135, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.254355400696864, |
|
"grad_norm": 24.033562398904397, |
|
"learning_rate": 3.60039605962848e-07, |
|
"logits/chosen": 1.6456016302108765, |
|
"logits/rejected": 4.141748905181885, |
|
"logps/chosen": -367.87652587890625, |
|
"logps/rejected": -632.2777099609375, |
|
"loss": 0.1681, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -1.4175409078598022, |
|
"rewards/margins": 2.8999009132385254, |
|
"rewards/rejected": -4.317441463470459, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.32404181184669, |
|
"grad_norm": 26.83633778813544, |
|
"learning_rate": 3.414254424857272e-07, |
|
"logits/chosen": 1.1464613676071167, |
|
"logits/rejected": 3.143468141555786, |
|
"logps/chosen": -339.797119140625, |
|
"logps/rejected": -644.8397216796875, |
|
"loss": 0.1546, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -1.5132825374603271, |
|
"rewards/margins": 3.0432307720184326, |
|
"rewards/rejected": -4.55651330947876, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.3937282229965158, |
|
"grad_norm": 19.155865265559413, |
|
"learning_rate": 3.2220600439305403e-07, |
|
"logits/chosen": 1.3574491739273071, |
|
"logits/rejected": 3.787911891937256, |
|
"logps/chosen": -416.85736083984375, |
|
"logps/rejected": -657.4432373046875, |
|
"loss": 0.1459, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.535851240158081, |
|
"rewards/margins": 2.970709800720215, |
|
"rewards/rejected": -4.506561279296875, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.3937282229965158, |
|
"eval_logits/chosen": 1.2032525539398193, |
|
"eval_logits/rejected": 2.118121862411499, |
|
"eval_logps/chosen": -421.3101501464844, |
|
"eval_logps/rejected": -557.5459594726562, |
|
"eval_loss": 0.5505546927452087, |
|
"eval_rewards/accuracies": 0.73828125, |
|
"eval_rewards/chosen": -1.5868018865585327, |
|
"eval_rewards/margins": 1.3620365858078003, |
|
"eval_rewards/rejected": -2.948838472366333, |
|
"eval_runtime": 103.6526, |
|
"eval_samples_per_second": 19.295, |
|
"eval_steps_per_second": 0.309, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4634146341463414, |
|
"grad_norm": 22.71782849317497, |
|
"learning_rate": 3.025085323925175e-07, |
|
"logits/chosen": 1.5900554656982422, |
|
"logits/rejected": 3.4598968029022217, |
|
"logps/chosen": -366.8812561035156, |
|
"logps/rejected": -705.404541015625, |
|
"loss": 0.1762, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.5007431507110596, |
|
"rewards/margins": 3.2060344219207764, |
|
"rewards/rejected": -4.706777095794678, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.533101045296167, |
|
"grad_norm": 31.902926071044504, |
|
"learning_rate": 2.8246343197594046e-07, |
|
"logits/chosen": 1.4988172054290771, |
|
"logits/rejected": 3.816242218017578, |
|
"logps/chosen": -390.7834167480469, |
|
"logps/rejected": -683.3237915039062, |
|
"loss": 0.1638, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": -1.6800391674041748, |
|
"rewards/margins": 3.071725845336914, |
|
"rewards/rejected": -4.751765251159668, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.6027874564459932, |
|
"grad_norm": 24.581190457673245, |
|
"learning_rate": 2.622034100804566e-07, |
|
"logits/chosen": 1.2105190753936768, |
|
"logits/rejected": 3.060250759124756, |
|
"logps/chosen": -380.35443115234375, |
|
"logps/rejected": -664.1146240234375, |
|
"loss": 0.1477, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -1.7808634042739868, |
|
"rewards/margins": 2.9058308601379395, |
|
"rewards/rejected": -4.686694145202637, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.6724738675958188, |
|
"grad_norm": 34.80569495595782, |
|
"learning_rate": 2.418625965131574e-07, |
|
"logits/chosen": 0.8179672360420227, |
|
"logits/rejected": 3.110491991043091, |
|
"logps/chosen": -394.3634338378906, |
|
"logps/rejected": -657.7012939453125, |
|
"loss": 0.1637, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": -1.7445188760757446, |
|
"rewards/margins": 2.720533847808838, |
|
"rewards/rejected": -4.465052604675293, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.7421602787456445, |
|
"grad_norm": 25.450844453542942, |
|
"learning_rate": 2.2157565595574668e-07, |
|
"logits/chosen": 0.9487978219985962, |
|
"logits/rejected": 3.9239680767059326, |
|
"logps/chosen": -408.14556884765625, |
|
"logps/rejected": -711.4246215820312, |
|
"loss": 0.155, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.8012412786483765, |
|
"rewards/margins": 3.1372928619384766, |
|
"rewards/rejected": -4.938534736633301, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7421602787456445, |
|
"eval_logits/chosen": -0.12574833631515503, |
|
"eval_logits/rejected": 0.6638859510421753, |
|
"eval_logps/chosen": -436.41619873046875, |
|
"eval_logps/rejected": -576.3017578125, |
|
"eval_loss": 0.5420774221420288, |
|
"eval_rewards/accuracies": 0.7421875, |
|
"eval_rewards/chosen": -1.7378628253936768, |
|
"eval_rewards/margins": 1.3985340595245361, |
|
"eval_rewards/rejected": -3.136396646499634, |
|
"eval_runtime": 103.0892, |
|
"eval_samples_per_second": 19.401, |
|
"eval_steps_per_second": 0.31, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.8118466898954704, |
|
"grad_norm": 23.92913273720315, |
|
"learning_rate": 2.0147689642810138e-07, |
|
"logits/chosen": 0.7426701784133911, |
|
"logits/rejected": 2.593843460083008, |
|
"logps/chosen": -370.8197021484375, |
|
"logps/rejected": -702.0731811523438, |
|
"loss": 0.1381, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -1.6753326654434204, |
|
"rewards/margins": 3.3540942668914795, |
|
"rewards/rejected": -5.029427528381348, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.8815331010452963, |
|
"grad_norm": 24.753449787195066, |
|
"learning_rate": 1.8169938011308233e-07, |
|
"logits/chosen": 1.4780113697052002, |
|
"logits/rejected": 3.954249620437622, |
|
"logps/chosen": -449.23419189453125, |
|
"logps/rejected": -756.6870727539062, |
|
"loss": 0.1492, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -2.0043387413024902, |
|
"rewards/margins": 3.3266875743865967, |
|
"rewards/rejected": -5.331027030944824, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.951219512195122, |
|
"grad_norm": 13.501142665400005, |
|
"learning_rate": 1.6237404242930697e-07, |
|
"logits/chosen": 1.2981535196304321, |
|
"logits/rejected": 4.038895606994629, |
|
"logps/chosen": -451.763427734375, |
|
"logps/rejected": -725.1036376953125, |
|
"loss": 0.1381, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.9369767904281616, |
|
"rewards/margins": 3.1422879695892334, |
|
"rewards/rejected": -5.0792646408081055, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.0209059233449476, |
|
"grad_norm": 9.538816556642104, |
|
"learning_rate": 1.4362882518398945e-07, |
|
"logits/chosen": 1.494961142539978, |
|
"logits/rejected": 4.037637233734131, |
|
"logps/chosen": -420.6214904785156, |
|
"logps/rejected": -780.333984375, |
|
"loss": 0.1227, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -1.9955527782440186, |
|
"rewards/margins": 3.5567429065704346, |
|
"rewards/rejected": -5.552295684814453, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.0905923344947737, |
|
"grad_norm": 12.60922676008742, |
|
"learning_rate": 1.2558782954473823e-07, |
|
"logits/chosen": 1.1326804161071777, |
|
"logits/rejected": 4.095265865325928, |
|
"logps/chosen": -436.822998046875, |
|
"logps/rejected": -848.0110473632812, |
|
"loss": 0.0778, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.9827162027359009, |
|
"rewards/margins": 3.976830244064331, |
|
"rewards/rejected": -5.9595465660095215, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.0905923344947737, |
|
"eval_logits/chosen": 1.3197466135025024, |
|
"eval_logits/rejected": 2.447815418243408, |
|
"eval_logps/chosen": -487.21826171875, |
|
"eval_logps/rejected": -653.505615234375, |
|
"eval_loss": 0.5661060810089111, |
|
"eval_rewards/accuracies": 0.7578125, |
|
"eval_rewards/chosen": -2.2458834648132324, |
|
"eval_rewards/margins": 1.6625516414642334, |
|
"eval_rewards/rejected": -3.908435106277466, |
|
"eval_runtime": 104.4454, |
|
"eval_samples_per_second": 19.149, |
|
"eval_steps_per_second": 0.306, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.1602787456445993, |
|
"grad_norm": 11.223175960925781, |
|
"learning_rate": 1.0837049443799279e-07, |
|
"logits/chosen": 1.4726234674453735, |
|
"logits/rejected": 4.226162433624268, |
|
"logps/chosen": -435.3155212402344, |
|
"logps/rejected": -828.8818359375, |
|
"loss": 0.0668, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -1.937731146812439, |
|
"rewards/margins": 4.151057243347168, |
|
"rewards/rejected": -6.088788032531738, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.229965156794425, |
|
"grad_norm": 10.719580615366253, |
|
"learning_rate": 9.209080581344306e-08, |
|
"logits/chosen": 2.386169195175171, |
|
"logits/rejected": 4.565543174743652, |
|
"logps/chosen": -418.3710021972656, |
|
"logps/rejected": -801.6163330078125, |
|
"loss": 0.0806, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.2484259605407715, |
|
"rewards/margins": 3.874068021774292, |
|
"rewards/rejected": -6.122493743896484, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.2996515679442506, |
|
"grad_norm": 11.257400452209483, |
|
"learning_rate": 7.685654200943378e-08, |
|
"logits/chosen": 2.802990674972534, |
|
"logits/rejected": 4.914515495300293, |
|
"logps/chosen": -466.86846923828125, |
|
"logps/rejected": -849.2081298828125, |
|
"loss": 0.0786, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -2.4424731731414795, |
|
"rewards/margins": 3.8902206420898438, |
|
"rewards/rejected": -6.332693576812744, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.3693379790940767, |
|
"grad_norm": 17.67311746157273, |
|
"learning_rate": 6.27685602153478e-08, |
|
"logits/chosen": 2.4263174533843994, |
|
"logits/rejected": 5.334603786468506, |
|
"logps/chosen": -501.0049743652344, |
|
"logps/rejected": -861.1316528320312, |
|
"loss": 0.0648, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.3902511596679688, |
|
"rewards/margins": 4.114856719970703, |
|
"rewards/rejected": -6.505107879638672, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.4390243902439024, |
|
"grad_norm": 10.445164955483738, |
|
"learning_rate": 4.992012875488669e-08, |
|
"logits/chosen": 2.200308084487915, |
|
"logits/rejected": 5.237423896789551, |
|
"logps/chosen": -467.2925720214844, |
|
"logps/rejected": -873.8508911132812, |
|
"loss": 0.063, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -2.2877776622772217, |
|
"rewards/margins": 4.244064807891846, |
|
"rewards/rejected": -6.5318427085876465, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.4390243902439024, |
|
"eval_logits/chosen": 2.029879570007324, |
|
"eval_logits/rejected": 3.200937032699585, |
|
"eval_logps/chosen": -507.741943359375, |
|
"eval_logps/rejected": -685.679443359375, |
|
"eval_loss": 0.5745352506637573, |
|
"eval_rewards/accuracies": 0.74609375, |
|
"eval_rewards/chosen": -2.451119899749756, |
|
"eval_rewards/margins": 1.779052972793579, |
|
"eval_rewards/rejected": -4.230173110961914, |
|
"eval_runtime": 105.4127, |
|
"eval_samples_per_second": 18.973, |
|
"eval_steps_per_second": 0.304, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.508710801393728, |
|
"grad_norm": 10.66433078302641, |
|
"learning_rate": 3.8396309610812086e-08, |
|
"logits/chosen": 2.8574512004852295, |
|
"logits/rejected": 5.9728217124938965, |
|
"logps/chosen": -452.10546875, |
|
"logps/rejected": -884.5281372070312, |
|
"loss": 0.0578, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.3754355907440186, |
|
"rewards/margins": 4.4589080810546875, |
|
"rewards/rejected": -6.834343910217285, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.578397212543554, |
|
"grad_norm": 14.129834453371615, |
|
"learning_rate": 2.8273395279091005e-08, |
|
"logits/chosen": 2.6822140216827393, |
|
"logits/rejected": 6.491819858551025, |
|
"logps/chosen": -505.1715393066406, |
|
"logps/rejected": -903.2467651367188, |
|
"loss": 0.0563, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.402754068374634, |
|
"rewards/margins": 4.534693241119385, |
|
"rewards/rejected": -6.937447547912598, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.64808362369338, |
|
"grad_norm": 17.667869862784286, |
|
"learning_rate": 1.9618403680707053e-08, |
|
"logits/chosen": 3.0484352111816406, |
|
"logits/rejected": 5.842028617858887, |
|
"logps/chosen": -509.3741149902344, |
|
"logps/rejected": -942.7772216796875, |
|
"loss": 0.0629, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.5882575511932373, |
|
"rewards/margins": 4.363358974456787, |
|
"rewards/rejected": -6.9516167640686035, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.7177700348432055, |
|
"grad_norm": 23.19194674974416, |
|
"learning_rate": 1.2488634475031761e-08, |
|
"logits/chosen": 2.507598876953125, |
|
"logits/rejected": 5.978418350219727, |
|
"logps/chosen": -524.1458740234375, |
|
"logps/rejected": -918.3518676757812, |
|
"loss": 0.0567, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -2.6885342597961426, |
|
"rewards/margins": 4.4366984367370605, |
|
"rewards/rejected": -7.1252336502075195, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.7874564459930316, |
|
"grad_norm": 9.128226349975483, |
|
"learning_rate": 6.9312897121466815e-09, |
|
"logits/chosen": 2.713538885116577, |
|
"logits/rejected": 6.0777788162231445, |
|
"logps/chosen": -524.9368896484375, |
|
"logps/rejected": -934.0505981445312, |
|
"loss": 0.0546, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.7826755046844482, |
|
"rewards/margins": 4.360199928283691, |
|
"rewards/rejected": -7.142875671386719, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7874564459930316, |
|
"eval_logits/chosen": 2.500847339630127, |
|
"eval_logits/rejected": 3.7820303440093994, |
|
"eval_logps/chosen": -558.7693481445312, |
|
"eval_logps/rejected": -752.859130859375, |
|
"eval_loss": 0.588392436504364, |
|
"eval_rewards/accuracies": 0.7578125, |
|
"eval_rewards/chosen": -2.9613940715789795, |
|
"eval_rewards/margins": 1.9405763149261475, |
|
"eval_rewards/rejected": -4.901970386505127, |
|
"eval_runtime": 103.5135, |
|
"eval_samples_per_second": 19.321, |
|
"eval_steps_per_second": 0.309, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 11.962588165819302, |
|
"learning_rate": 2.983161335556761e-09, |
|
"logits/chosen": 3.7102668285369873, |
|
"logits/rejected": 6.578777313232422, |
|
"logps/chosen": -509.08746337890625, |
|
"logps/rejected": -960.9105224609375, |
|
"loss": 0.0538, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.9860901832580566, |
|
"rewards/margins": 4.577176094055176, |
|
"rewards/rejected": -7.563265323638916, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.926829268292683, |
|
"grad_norm": 10.645124338408442, |
|
"learning_rate": 6.703876041571077e-10, |
|
"logits/chosen": 2.7117552757263184, |
|
"logits/rejected": 5.930572986602783, |
|
"logps/chosen": -520.0614624023438, |
|
"logps/rejected": -930.9078979492188, |
|
"loss": 0.0637, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -2.889979839324951, |
|
"rewards/margins": 4.437302112579346, |
|
"rewards/rejected": -7.327281951904297, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.989547038327526, |
|
"step": 429, |
|
"total_flos": 0.0, |
|
"train_loss": 0.22287911155840734, |
|
"train_runtime": 11425.5472, |
|
"train_samples_per_second": 4.815, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 429, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|