|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9965156794425087, |
|
"eval_steps": 50, |
|
"global_step": 143, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06968641114982578, |
|
"grad_norm": 7.942402444587964, |
|
"learning_rate": 3.333333333333333e-07, |
|
"logits/chosen": -2.747418165206909, |
|
"logits/rejected": -2.7369940280914307, |
|
"logps/chosen": -291.8316650390625, |
|
"logps/rejected": -281.5037536621094, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": 0.0003662299714051187, |
|
"rewards/margins": 0.0004625393485184759, |
|
"rewards/rejected": -9.630931162973866e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13937282229965156, |
|
"grad_norm": 8.199051257817597, |
|
"learning_rate": 4.981198836496775e-07, |
|
"logits/chosen": -2.7628424167633057, |
|
"logits/rejected": -2.7486491203308105, |
|
"logps/chosen": -298.9639587402344, |
|
"logps/rejected": -320.4031066894531, |
|
"loss": 0.6901, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.008746958337724209, |
|
"rewards/margins": 0.006991321686655283, |
|
"rewards/rejected": 0.0017556389793753624, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20905923344947736, |
|
"grad_norm": 8.009396280118287, |
|
"learning_rate": 4.832481997086846e-07, |
|
"logits/chosen": -2.713383436203003, |
|
"logits/rejected": -2.714205503463745, |
|
"logps/chosen": -296.2840881347656, |
|
"logps/rejected": -289.38421630859375, |
|
"loss": 0.681, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.04019603878259659, |
|
"rewards/margins": 0.04010898619890213, |
|
"rewards/rejected": 8.704829815542325e-05, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2787456445993031, |
|
"grad_norm": 8.916455655557288, |
|
"learning_rate": 4.543962032878959e-07, |
|
"logits/chosen": -2.679121732711792, |
|
"logits/rejected": -2.674286365509033, |
|
"logps/chosen": -298.8094177246094, |
|
"logps/rejected": -318.2736511230469, |
|
"loss": 0.6637, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.003670867532491684, |
|
"rewards/margins": 0.06940315663814545, |
|
"rewards/rejected": -0.07307400554418564, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"grad_norm": 11.007582392023577, |
|
"learning_rate": 4.1329321073844413e-07, |
|
"logits/chosen": -2.5453474521636963, |
|
"logits/rejected": -2.553844451904297, |
|
"logps/chosen": -272.39227294921875, |
|
"logps/rejected": -269.97125244140625, |
|
"loss": 0.6597, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.1337047815322876, |
|
"rewards/margins": 0.06374003738164902, |
|
"rewards/rejected": -0.1974448263645172, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"eval_logits/chosen": -2.5355470180511475, |
|
"eval_logits/rejected": -2.496910572052002, |
|
"eval_logps/chosen": -273.103515625, |
|
"eval_logps/rejected": -285.6536865234375, |
|
"eval_loss": 0.6512949466705322, |
|
"eval_rewards/accuracies": 0.6640625, |
|
"eval_rewards/chosen": -0.1047358363866806, |
|
"eval_rewards/margins": 0.1251804083585739, |
|
"eval_rewards/rejected": -0.22991624474525452, |
|
"eval_runtime": 105.224, |
|
"eval_samples_per_second": 19.007, |
|
"eval_steps_per_second": 0.304, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4181184668989547, |
|
"grad_norm": 11.130762557977343, |
|
"learning_rate": 3.624028324136517e-07, |
|
"logits/chosen": -2.5859858989715576, |
|
"logits/rejected": -2.590031147003174, |
|
"logps/chosen": -312.9039611816406, |
|
"logps/rejected": -316.5061950683594, |
|
"loss": 0.6515, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.10166505724191666, |
|
"rewards/margins": 0.13827314972877502, |
|
"rewards/rejected": -0.23993822932243347, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 10.367706676784401, |
|
"learning_rate": 3.047753100392174e-07, |
|
"logits/chosen": -2.515353202819824, |
|
"logits/rejected": -2.5070269107818604, |
|
"logps/chosen": -303.077392578125, |
|
"logps/rejected": -301.082763671875, |
|
"loss": 0.6453, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.2464326173067093, |
|
"rewards/margins": 0.14911559224128723, |
|
"rewards/rejected": -0.3955482840538025, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5574912891986062, |
|
"grad_norm": 10.328785377431698, |
|
"learning_rate": 2.4386469286927194e-07, |
|
"logits/chosen": -2.534677028656006, |
|
"logits/rejected": -2.515333652496338, |
|
"logps/chosen": -305.86944580078125, |
|
"logps/rejected": -315.22369384765625, |
|
"loss": 0.6525, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.3514646887779236, |
|
"rewards/margins": 0.10919757932424545, |
|
"rewards/rejected": -0.460662305355072, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.627177700348432, |
|
"grad_norm": 10.256439047308396, |
|
"learning_rate": 1.8332181063127542e-07, |
|
"logits/chosen": -2.4058992862701416, |
|
"logits/rejected": -2.4308979511260986, |
|
"logps/chosen": -272.29986572265625, |
|
"logps/rejected": -304.98779296875, |
|
"loss": 0.6315, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.19782552123069763, |
|
"rewards/margins": 0.18659016489982605, |
|
"rewards/rejected": -0.38441571593284607, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"grad_norm": 11.456138890836337, |
|
"learning_rate": 1.26775451942554e-07, |
|
"logits/chosen": -2.4451889991760254, |
|
"logits/rejected": -2.447258472442627, |
|
"logps/chosen": -332.0220031738281, |
|
"logps/rejected": -343.72979736328125, |
|
"loss": 0.6383, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.2864390015602112, |
|
"rewards/margins": 0.22521762549877167, |
|
"rewards/rejected": -0.511656641960144, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"eval_logits/chosen": -2.393218517303467, |
|
"eval_logits/rejected": -2.3483967781066895, |
|
"eval_logps/chosen": -295.345947265625, |
|
"eval_logps/rejected": -318.13665771484375, |
|
"eval_loss": 0.6311340928077698, |
|
"eval_rewards/accuracies": 0.6875, |
|
"eval_rewards/chosen": -0.3271602392196655, |
|
"eval_rewards/margins": 0.22758512198925018, |
|
"eval_rewards/rejected": -0.5547453165054321, |
|
"eval_runtime": 103.8391, |
|
"eval_samples_per_second": 19.261, |
|
"eval_steps_per_second": 0.308, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7665505226480837, |
|
"grad_norm": 15.049705548114918, |
|
"learning_rate": 7.761486381573326e-08, |
|
"logits/chosen": -2.472832679748535, |
|
"logits/rejected": -2.4680256843566895, |
|
"logps/chosen": -347.2950439453125, |
|
"logps/rejected": -383.56903076171875, |
|
"loss": 0.6223, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.36344587802886963, |
|
"rewards/margins": 0.232518270611763, |
|
"rewards/rejected": -0.5959641933441162, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8362369337979094, |
|
"grad_norm": 11.65420047853757, |
|
"learning_rate": 3.878660868757322e-08, |
|
"logits/chosen": -2.3938543796539307, |
|
"logits/rejected": -2.3905398845672607, |
|
"logps/chosen": -329.33978271484375, |
|
"logps/rejected": -346.9275817871094, |
|
"loss": 0.6335, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.34853675961494446, |
|
"rewards/margins": 0.21830621361732483, |
|
"rewards/rejected": -0.5668429136276245, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9059233449477352, |
|
"grad_norm": 11.07371443957527, |
|
"learning_rate": 1.261795485174083e-08, |
|
"logits/chosen": -2.4500081539154053, |
|
"logits/rejected": -2.451570510864258, |
|
"logps/chosen": -330.7276306152344, |
|
"logps/rejected": -342.2732849121094, |
|
"loss": 0.626, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3020505905151367, |
|
"rewards/margins": 0.23814749717712402, |
|
"rewards/rejected": -0.540198028087616, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 11.70940252198527, |
|
"learning_rate": 6.773858303274482e-10, |
|
"logits/chosen": -2.421135425567627, |
|
"logits/rejected": -2.4089722633361816, |
|
"logps/chosen": -300.1723937988281, |
|
"logps/rejected": -315.6776123046875, |
|
"loss": 0.6255, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.36181899905204773, |
|
"rewards/margins": 0.13425084948539734, |
|
"rewards/rejected": -0.49606984853744507, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9965156794425087, |
|
"step": 143, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6505647914392965, |
|
"train_runtime": 3852.398, |
|
"train_samples_per_second": 4.761, |
|
"train_steps_per_second": 0.037 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 143, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|