|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.843719482421875, |
|
"logits/rejected": -2.8328428268432617, |
|
"logps/chosen": -340.24505615234375, |
|
"logps/pi_response": -88.81813049316406, |
|
"logps/ref_response": -88.81813049316406, |
|
"logps/rejected": -126.11064147949219, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.7834701538085938, |
|
"logits/rejected": -2.751505136489868, |
|
"logps/chosen": -225.68850708007812, |
|
"logps/pi_response": -73.31761932373047, |
|
"logps/ref_response": -73.05384826660156, |
|
"logps/rejected": -120.19402313232422, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.5208333134651184, |
|
"rewards/chosen": 0.0008960026898421347, |
|
"rewards/margins": 0.0021771909669041634, |
|
"rewards/rejected": -0.0012811883352696896, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.758051633834839, |
|
"logits/rejected": -2.748227834701538, |
|
"logps/chosen": -216.07437133789062, |
|
"logps/pi_response": -79.01066589355469, |
|
"logps/ref_response": -72.98006439208984, |
|
"logps/rejected": -114.68902587890625, |
|
"loss": 0.6649, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.006878279149532318, |
|
"rewards/margins": 0.04253412410616875, |
|
"rewards/rejected": -0.03565584123134613, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.6395750045776367, |
|
"logits/rejected": -2.6150403022766113, |
|
"logps/chosen": -240.57943725585938, |
|
"logps/pi_response": -100.7327651977539, |
|
"logps/ref_response": -68.2356948852539, |
|
"logps/rejected": -123.48177337646484, |
|
"loss": 0.6193, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.09199332445859909, |
|
"rewards/margins": 0.1744811087846756, |
|
"rewards/rejected": -0.2664744257926941, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.5330681800842285, |
|
"logits/rejected": -2.5084614753723145, |
|
"logps/chosen": -274.4284973144531, |
|
"logps/pi_response": -148.79498291015625, |
|
"logps/ref_response": -75.56888580322266, |
|
"logps/rejected": -188.65740966796875, |
|
"loss": 0.5787, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.25631314516067505, |
|
"rewards/margins": 0.3753192722797394, |
|
"rewards/rejected": -0.6316324472427368, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.545583724975586, |
|
"logits/rejected": -2.520908832550049, |
|
"logps/chosen": -284.467041015625, |
|
"logps/pi_response": -170.04429626464844, |
|
"logps/ref_response": -71.79032897949219, |
|
"logps/rejected": -199.2174530029297, |
|
"loss": 0.6088, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.43174758553504944, |
|
"rewards/margins": 0.3946196138858795, |
|
"rewards/rejected": -0.826367199420929, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.528303623199463, |
|
"logits/rejected": -2.4839463233947754, |
|
"logps/chosen": -223.5751953125, |
|
"logps/pi_response": -163.4947967529297, |
|
"logps/ref_response": -67.7431869506836, |
|
"logps/rejected": -192.84942626953125, |
|
"loss": 0.5741, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.37526100873947144, |
|
"rewards/margins": 0.4467507302761078, |
|
"rewards/rejected": -0.8220117688179016, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.5618700981140137, |
|
"logits/rejected": -2.534076690673828, |
|
"logps/chosen": -274.2109680175781, |
|
"logps/pi_response": -197.44552612304688, |
|
"logps/ref_response": -73.56925964355469, |
|
"logps/rejected": -213.0953369140625, |
|
"loss": 0.5409, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.4919354319572449, |
|
"rewards/margins": 0.5938761830329895, |
|
"rewards/rejected": -1.0858116149902344, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.6031501293182373, |
|
"logits/rejected": -2.5903992652893066, |
|
"logps/chosen": -286.1940612792969, |
|
"logps/pi_response": -206.9901885986328, |
|
"logps/ref_response": -77.56591033935547, |
|
"logps/rejected": -233.7861785888672, |
|
"loss": 0.5351, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.532009482383728, |
|
"rewards/margins": 0.6012960076332092, |
|
"rewards/rejected": -1.133305549621582, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.56282377243042, |
|
"logits/rejected": -2.52233624458313, |
|
"logps/chosen": -299.3221740722656, |
|
"logps/pi_response": -189.3115234375, |
|
"logps/ref_response": -72.58592224121094, |
|
"logps/rejected": -221.27865600585938, |
|
"loss": 0.5346, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.4943874776363373, |
|
"rewards/margins": 0.5152736902236938, |
|
"rewards/rejected": -1.009661316871643, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.56676983833313, |
|
"logits/rejected": -2.5280394554138184, |
|
"logps/chosen": -305.5573425292969, |
|
"logps/pi_response": -211.8637237548828, |
|
"logps/ref_response": -74.8798599243164, |
|
"logps/rejected": -226.0255889892578, |
|
"loss": 0.5418, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.5523956418037415, |
|
"rewards/margins": 0.6482929587364197, |
|
"rewards/rejected": -1.2006886005401611, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.5243890285491943, |
|
"logits/rejected": -2.4874043464660645, |
|
"logps/chosen": -248.3354034423828, |
|
"logps/pi_response": -187.9515380859375, |
|
"logps/ref_response": -63.39829635620117, |
|
"logps/rejected": -205.1956329345703, |
|
"loss": 0.5203, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.5550650358200073, |
|
"rewards/margins": 0.5441204905509949, |
|
"rewards/rejected": -1.0991854667663574, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.5527961254119873, |
|
"logits/rejected": -2.510192394256592, |
|
"logps/chosen": -268.68243408203125, |
|
"logps/pi_response": -199.80201721191406, |
|
"logps/ref_response": -66.3088607788086, |
|
"logps/rejected": -221.51443481445312, |
|
"loss": 0.5217, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.4530016779899597, |
|
"rewards/margins": 0.7860355973243713, |
|
"rewards/rejected": -1.239037275314331, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.5384299755096436, |
|
"logits/rejected": -2.5014443397521973, |
|
"logps/chosen": -301.70086669921875, |
|
"logps/pi_response": -213.873779296875, |
|
"logps/ref_response": -75.92735290527344, |
|
"logps/rejected": -232.5878448486328, |
|
"loss": 0.5269, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.46107131242752075, |
|
"rewards/margins": 0.8083871603012085, |
|
"rewards/rejected": -1.269458532333374, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.5545084476470947, |
|
"logits/rejected": -2.5045456886291504, |
|
"logps/chosen": -285.5328063964844, |
|
"logps/pi_response": -202.77578735351562, |
|
"logps/ref_response": -78.19523620605469, |
|
"logps/rejected": -217.67587280273438, |
|
"loss": 0.5366, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.4644014835357666, |
|
"rewards/margins": 0.6116248965263367, |
|
"rewards/rejected": -1.076026439666748, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.5840935707092285, |
|
"logits/rejected": -2.5227558612823486, |
|
"logps/chosen": -280.5240173339844, |
|
"logps/pi_response": -208.884765625, |
|
"logps/ref_response": -77.39044189453125, |
|
"logps/rejected": -227.864990234375, |
|
"loss": 0.5268, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.46861687302589417, |
|
"rewards/margins": 0.6975622177124023, |
|
"rewards/rejected": -1.1661790609359741, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5635039491473504, |
|
"train_runtime": 4141.088, |
|
"train_samples_per_second": 4.921, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|