|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -1.995415210723877, |
|
"logits/rejected": -2.0361223220825195, |
|
"logps/chosen": -475.42401123046875, |
|
"logps/pi_response": -236.53262329101562, |
|
"logps/ref_response": -236.53262329101562, |
|
"logps/rejected": -571.512451171875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.009598970413208, |
|
"logits/rejected": -1.9492552280426025, |
|
"logps/chosen": -398.6226501464844, |
|
"logps/pi_response": -160.84869384765625, |
|
"logps/ref_response": -161.05593872070312, |
|
"logps/rejected": -436.8831481933594, |
|
"loss": 0.6907, |
|
"rewards/accuracies": 0.4652777910232544, |
|
"rewards/chosen": -0.00939179491251707, |
|
"rewards/margins": 0.001390137942507863, |
|
"rewards/rejected": -0.010781930759549141, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -1.9771480560302734, |
|
"logits/rejected": -1.934857726097107, |
|
"logps/chosen": -397.58258056640625, |
|
"logps/pi_response": -166.76702880859375, |
|
"logps/ref_response": -170.16830444335938, |
|
"logps/rejected": -543.4985961914062, |
|
"loss": 0.647, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.2936175465583801, |
|
"rewards/margins": 0.1937868893146515, |
|
"rewards/rejected": -0.4874044954776764, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -1.9177439212799072, |
|
"logits/rejected": -1.8892943859100342, |
|
"logps/chosen": -484.595947265625, |
|
"logps/pi_response": -180.4770965576172, |
|
"logps/ref_response": -176.73623657226562, |
|
"logps/rejected": -564.6936645507812, |
|
"loss": 0.6744, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.7892134785652161, |
|
"rewards/margins": 0.32036879658699036, |
|
"rewards/rejected": -1.1095821857452393, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -1.9668203592300415, |
|
"logits/rejected": -1.9143187999725342, |
|
"logps/chosen": -483.791015625, |
|
"logps/pi_response": -200.40647888183594, |
|
"logps/ref_response": -175.1200408935547, |
|
"logps/rejected": -604.1815185546875, |
|
"loss": 0.6401, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.7993307113647461, |
|
"rewards/margins": 0.2609265446662903, |
|
"rewards/rejected": -1.0602573156356812, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -1.839737892150879, |
|
"logits/rejected": -1.6476318836212158, |
|
"logps/chosen": -528.5265502929688, |
|
"logps/pi_response": -202.95977783203125, |
|
"logps/ref_response": -184.63198852539062, |
|
"logps/rejected": -655.3939208984375, |
|
"loss": 0.5966, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.9922237396240234, |
|
"rewards/margins": 0.4673174321651459, |
|
"rewards/rejected": -1.4595410823822021, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -1.1266146898269653, |
|
"logits/rejected": -0.9666005969047546, |
|
"logps/chosen": -499.87744140625, |
|
"logps/pi_response": -201.53793334960938, |
|
"logps/ref_response": -175.98574829101562, |
|
"logps/rejected": -626.661376953125, |
|
"loss": 0.5888, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.8994181752204895, |
|
"rewards/margins": 0.5771986842155457, |
|
"rewards/rejected": -1.4766168594360352, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -0.5178133845329285, |
|
"logits/rejected": -0.19050300121307373, |
|
"logps/chosen": -519.18115234375, |
|
"logps/pi_response": -228.1885528564453, |
|
"logps/ref_response": -161.18075561523438, |
|
"logps/rejected": -662.8558349609375, |
|
"loss": 0.586, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.2691316604614258, |
|
"rewards/margins": 0.6006889939308167, |
|
"rewards/rejected": -1.8698203563690186, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -0.6555771827697754, |
|
"logits/rejected": -0.3421247899532318, |
|
"logps/chosen": -536.9698486328125, |
|
"logps/pi_response": -224.0596923828125, |
|
"logps/ref_response": -175.85252380371094, |
|
"logps/rejected": -673.0121459960938, |
|
"loss": 0.5367, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.2228807210922241, |
|
"rewards/margins": 0.626615583896637, |
|
"rewards/rejected": -1.8494962453842163, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -0.19603128731250763, |
|
"logits/rejected": 0.2836986184120178, |
|
"logps/chosen": -586.1405029296875, |
|
"logps/pi_response": -253.3416290283203, |
|
"logps/ref_response": -184.4833526611328, |
|
"logps/rejected": -720.2657470703125, |
|
"loss": 0.5611, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.6138103008270264, |
|
"rewards/margins": 0.6346837878227234, |
|
"rewards/rejected": -2.2484939098358154, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -0.19370171427726746, |
|
"logits/rejected": 0.2344537228345871, |
|
"logps/chosen": -579.291015625, |
|
"logps/pi_response": -266.8283386230469, |
|
"logps/ref_response": -191.95008850097656, |
|
"logps/rejected": -695.4265747070312, |
|
"loss": 0.5654, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.5290648937225342, |
|
"rewards/margins": 0.5712359547615051, |
|
"rewards/rejected": -2.1003005504608154, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -0.3242037892341614, |
|
"logits/rejected": 0.11515514552593231, |
|
"logps/chosen": -574.2125854492188, |
|
"logps/pi_response": -237.9784698486328, |
|
"logps/ref_response": -172.76661682128906, |
|
"logps/rejected": -651.3258056640625, |
|
"loss": 0.5715, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.267407774925232, |
|
"rewards/margins": 0.5044547915458679, |
|
"rewards/rejected": -1.7718626260757446, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -0.09869599342346191, |
|
"logits/rejected": 0.1822367161512375, |
|
"logps/chosen": -539.43701171875, |
|
"logps/pi_response": -243.4367218017578, |
|
"logps/ref_response": -166.47335815429688, |
|
"logps/rejected": -692.1175537109375, |
|
"loss": 0.5311, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.3617780208587646, |
|
"rewards/margins": 0.6481711268424988, |
|
"rewards/rejected": -2.009949207305908, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -0.07027504593133926, |
|
"logits/rejected": 0.25386515259742737, |
|
"logps/chosen": -550.7713623046875, |
|
"logps/pi_response": -254.2024383544922, |
|
"logps/ref_response": -178.47569274902344, |
|
"logps/rejected": -677.2970581054688, |
|
"loss": 0.5504, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.3074489831924438, |
|
"rewards/margins": 0.5863579511642456, |
|
"rewards/rejected": -1.8938068151474, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": 0.026742279529571533, |
|
"logits/rejected": 0.45248135924339294, |
|
"logps/chosen": -533.3641357421875, |
|
"logps/pi_response": -244.9961700439453, |
|
"logps/ref_response": -176.0931396484375, |
|
"logps/rejected": -698.1570434570312, |
|
"loss": 0.5502, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.3009979724884033, |
|
"rewards/margins": 0.6849142909049988, |
|
"rewards/rejected": -1.9859119653701782, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -0.41795143485069275, |
|
"logits/rejected": 0.16397379338741302, |
|
"logps/chosen": -548.3402099609375, |
|
"logps/pi_response": -245.32962036132812, |
|
"logps/ref_response": -180.5318145751953, |
|
"logps/rejected": -688.0223999023438, |
|
"loss": 0.5526, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.2109647989273071, |
|
"rewards/margins": 0.7583649754524231, |
|
"rewards/rejected": -1.969329833984375, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5857575074681696, |
|
"train_runtime": 4237.6593, |
|
"train_samples_per_second": 4.809, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|