taicheng's picture
Model save
e6a80c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010416666666666666,
"grad_norm": 0.6369225577240899,
"learning_rate": 4.4661867031221926e-08,
"logits/chosen": -2.590585231781006,
"logits/rejected": -2.5664222240448,
"logps/chosen": -80.29847717285156,
"logps/rejected": -53.10200881958008,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.10416666666666667,
"grad_norm": 0.5699844472823067,
"learning_rate": 4.4661867031221926e-07,
"logits/chosen": -2.5559396743774414,
"logits/rejected": -2.5378711223602295,
"logps/chosen": -87.90701293945312,
"logps/rejected": -81.000732421875,
"loss": 0.6931,
"rewards/accuracies": 0.1944444477558136,
"rewards/chosen": 7.878588803578168e-06,
"rewards/margins": -2.865187343559228e-05,
"rewards/rejected": 3.653046223917045e-05,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.5220617909669454,
"learning_rate": 8.932373406244385e-07,
"logits/chosen": -2.6017892360687256,
"logits/rejected": -2.5524039268493652,
"logps/chosen": -102.29816436767578,
"logps/rejected": -88.93941497802734,
"loss": 0.6929,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 0.0010181397665292025,
"rewards/margins": 0.00010430384281789884,
"rewards/rejected": 0.0009138360619544983,
"step": 20
},
{
"epoch": 0.3125,
"grad_norm": 0.8025622814810965,
"learning_rate": 8.85808141328471e-07,
"logits/chosen": -2.4462294578552246,
"logits/rejected": -2.4619638919830322,
"logps/chosen": -66.71989440917969,
"logps/rejected": -76.12188720703125,
"loss": 0.6922,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 0.0006937496364116669,
"rewards/margins": 0.001850384520366788,
"rewards/rejected": -0.0011566350003704429,
"step": 30
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.8299819796102439,
"learning_rate": 8.63767702844543e-07,
"logits/chosen": -2.4784650802612305,
"logits/rejected": -2.4647819995880127,
"logps/chosen": -74.95284271240234,
"logps/rejected": -78.62578582763672,
"loss": 0.6904,
"rewards/accuracies": 0.28125,
"rewards/chosen": -0.002713928697630763,
"rewards/margins": 0.004938088357448578,
"rewards/rejected": -0.007652017287909985,
"step": 40
},
{
"epoch": 0.5208333333333334,
"grad_norm": 1.7129675129738506,
"learning_rate": 8.278492807257787e-07,
"logits/chosen": -2.275789260864258,
"logits/rejected": -2.291548252105713,
"logps/chosen": -55.84526824951172,
"logps/rejected": -68.4447250366211,
"loss": 0.6892,
"rewards/accuracies": 0.20000000298023224,
"rewards/chosen": -0.004781405441462994,
"rewards/margins": 0.004440293647348881,
"rewards/rejected": -0.009221700020134449,
"step": 50
},
{
"epoch": 0.625,
"grad_norm": 1.6503123985933317,
"learning_rate": 7.792478322547763e-07,
"logits/chosen": -2.007650136947632,
"logits/rejected": -1.9700886011123657,
"logps/chosen": -114.91951751708984,
"logps/rejected": -133.2468719482422,
"loss": 0.6865,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": -0.03467016667127609,
"rewards/margins": 0.017464537173509598,
"rewards/rejected": -0.05213470384478569,
"step": 60
},
{
"epoch": 0.7291666666666666,
"grad_norm": 2.2984298301869166,
"learning_rate": 7.195802618323811e-07,
"logits/chosen": -0.7167578935623169,
"logits/rejected": -0.5914499163627625,
"logps/chosen": -169.179443359375,
"logps/rejected": -187.8536376953125,
"loss": 0.6802,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": -0.07043690979480743,
"rewards/margins": 0.02859547734260559,
"rewards/rejected": -0.09903239458799362,
"step": 70
},
{
"epoch": 0.8333333333333334,
"grad_norm": 7.779524343348389,
"learning_rate": 6.508316287570068e-07,
"logits/chosen": 0.3923811912536621,
"logits/rejected": 0.710164487361908,
"logps/chosen": -222.629638671875,
"logps/rejected": -283.06634521484375,
"loss": 0.669,
"rewards/accuracies": 0.3687500059604645,
"rewards/chosen": -0.13247501850128174,
"rewards/margins": 0.06450365483760834,
"rewards/rejected": -0.19697865843772888,
"step": 80
},
{
"epoch": 0.9375,
"grad_norm": 6.759384309065668,
"learning_rate": 5.752891069888697e-07,
"logits/chosen": 1.1880756616592407,
"logits/rejected": 1.3526252508163452,
"logps/chosen": -208.9159393310547,
"logps/rejected": -350.1947021484375,
"loss": 0.6679,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": -0.14983566105365753,
"rewards/margins": 0.12745192646980286,
"rewards/rejected": -0.2772876024246216,
"step": 90
},
{
"epoch": 1.0416666666666667,
"grad_norm": 6.64561814006091,
"learning_rate": 4.954658939683863e-07,
"logits/chosen": 1.6723073720932007,
"logits/rejected": 2.053896427154541,
"logps/chosen": -224.61581420898438,
"logps/rejected": -340.3245849609375,
"loss": 0.66,
"rewards/accuracies": 0.35624998807907104,
"rewards/chosen": -0.14611998200416565,
"rewards/margins": 0.11334720999002457,
"rewards/rejected": -0.2594671845436096,
"step": 100
},
{
"epoch": 1.0416666666666667,
"eval_logits/chosen": 2.222659111022949,
"eval_logits/rejected": 2.3998076915740967,
"eval_logps/chosen": -231.40858459472656,
"eval_logps/rejected": -327.22540283203125,
"eval_loss": 0.6709679961204529,
"eval_rewards/accuracies": 0.3154761791229248,
"eval_rewards/chosen": -0.15175169706344604,
"eval_rewards/margins": 0.08624394983053207,
"eval_rewards/rejected": -0.2379956692457199,
"eval_runtime": 113.6716,
"eval_samples_per_second": 17.595,
"eval_steps_per_second": 0.554,
"step": 100
},
{
"epoch": 1.1458333333333333,
"grad_norm": 10.14024135671265,
"learning_rate": 4.140175999393427e-07,
"logits/chosen": 2.7885522842407227,
"logits/rejected": 2.5952858924865723,
"logps/chosen": -217.1932830810547,
"logps/rejected": -423.316162109375,
"loss": 0.6344,
"rewards/accuracies": 0.375,
"rewards/chosen": -0.14682665467262268,
"rewards/margins": 0.18375235795974731,
"rewards/rejected": -0.3305789828300476,
"step": 110
},
{
"epoch": 1.25,
"grad_norm": 5.687581380009954,
"learning_rate": 3.3365389939087466e-07,
"logits/chosen": 2.281104564666748,
"logits/rejected": 2.7267863750457764,
"logps/chosen": -382.77508544921875,
"logps/rejected": -542.299560546875,
"loss": 0.6531,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.2657240629196167,
"rewards/margins": 0.1684594750404358,
"rewards/rejected": -0.43418359756469727,
"step": 120
},
{
"epoch": 1.3541666666666667,
"grad_norm": 8.59716391066745,
"learning_rate": 2.5704838385518876e-07,
"logits/chosen": 2.97074556350708,
"logits/rejected": 3.06956148147583,
"logps/chosen": -262.03167724609375,
"logps/rejected": -430.7930603027344,
"loss": 0.628,
"rewards/accuracies": 0.36250001192092896,
"rewards/chosen": -0.16968943178653717,
"rewards/margins": 0.16052642464637756,
"rewards/rejected": -0.3302158713340759,
"step": 130
},
{
"epoch": 1.4583333333333333,
"grad_norm": 11.108890574756282,
"learning_rate": 1.8674961513641657e-07,
"logits/chosen": 3.3152003288269043,
"logits/rejected": 3.317988634109497,
"logps/chosen": -273.95831298828125,
"logps/rejected": -546.5203247070312,
"loss": 0.6343,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": -0.21727445721626282,
"rewards/margins": 0.2482767552137375,
"rewards/rejected": -0.4655512273311615,
"step": 140
},
{
"epoch": 1.5625,
"grad_norm": 6.0584348916117925,
"learning_rate": 1.250963381092756e-07,
"logits/chosen": 3.313689708709717,
"logits/rejected": 3.6598522663116455,
"logps/chosen": -273.2975769042969,
"logps/rejected": -551.4356689453125,
"loss": 0.6207,
"rewards/accuracies": 0.38749998807907104,
"rewards/chosen": -0.19452792406082153,
"rewards/margins": 0.2676007151603699,
"rewards/rejected": -0.4621286988258362,
"step": 150
},
{
"epoch": 1.6666666666666665,
"grad_norm": 11.564568090628018,
"learning_rate": 7.413967384294965e-08,
"logits/chosen": 3.5584030151367188,
"logits/rejected": 3.723942279815674,
"logps/chosen": -357.8267517089844,
"logps/rejected": -687.1414794921875,
"loss": 0.6181,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -0.2677861154079437,
"rewards/margins": 0.31169238686561584,
"rewards/rejected": -0.5794785022735596,
"step": 160
},
{
"epoch": 1.7708333333333335,
"grad_norm": 12.348100258027504,
"learning_rate": 3.557488157963567e-08,
"logits/chosen": 4.085668087005615,
"logits/rejected": 4.25767183303833,
"logps/chosen": -461.731689453125,
"logps/rejected": -780.9080810546875,
"loss": 0.6175,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -0.3737877607345581,
"rewards/margins": 0.30115869641304016,
"rewards/rejected": -0.6749464273452759,
"step": 170
},
{
"epoch": 1.875,
"grad_norm": 14.884569591942372,
"learning_rate": 1.0684959754358576e-08,
"logits/chosen": 3.8572006225585938,
"logits/rejected": 4.063995361328125,
"logps/chosen": -347.04864501953125,
"logps/rejected": -606.2640380859375,
"loss": 0.617,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -0.26613375544548035,
"rewards/margins": 0.248983234167099,
"rewards/rejected": -0.5151170492172241,
"step": 180
},
{
"epoch": 1.9791666666666665,
"grad_norm": 7.915820342970624,
"learning_rate": 2.979623737803134e-10,
"logits/chosen": 3.7023074626922607,
"logits/rejected": 3.886380434036255,
"logps/chosen": -341.0790710449219,
"logps/rejected": -703.4373779296875,
"loss": 0.6085,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": -0.24430128931999207,
"rewards/margins": 0.3483408987522125,
"rewards/rejected": -0.5926421880722046,
"step": 190
},
{
"epoch": 2.0,
"step": 192,
"total_flos": 0.0,
"train_loss": 0.0,
"train_runtime": 0.1474,
"train_samples_per_second": 82937.448,
"train_steps_per_second": 1302.469
}
],
"logging_steps": 10,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}