|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 48, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1e-07, |
|
"logits/chosen": -2.901834726333618, |
|
"logits/rejected": -2.844017505645752, |
|
"logps/chosen": -210.10006713867188, |
|
"logps/pi_response": -112.89700317382812, |
|
"logps/ref_response": -112.89700317382812, |
|
"logps/rejected": -243.11831665039062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.83504027183137e-07, |
|
"logits/chosen": -2.7638163566589355, |
|
"logits/rejected": -2.704831838607788, |
|
"logps/chosen": -225.97950744628906, |
|
"logps/pi_response": -118.01399993896484, |
|
"logps/ref_response": -116.89596557617188, |
|
"logps/rejected": -233.20187377929688, |
|
"loss": 0.6869, |
|
"rewards/accuracies": 0.5416666865348816, |
|
"rewards/chosen": -0.027089955285191536, |
|
"rewards/margins": 0.013482754118740559, |
|
"rewards/rejected": -0.04057271406054497, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.643105808261596e-07, |
|
"logits/chosen": -2.8008337020874023, |
|
"logits/rejected": -2.774320125579834, |
|
"logps/chosen": -251.0797882080078, |
|
"logps/pi_response": -113.0524673461914, |
|
"logps/ref_response": -113.1565170288086, |
|
"logps/rejected": -278.3044128417969, |
|
"loss": 0.639, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.14624103903770447, |
|
"rewards/margins": 0.18217135965824127, |
|
"rewards/rejected": -0.32841241359710693, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8676665440207977e-07, |
|
"logits/chosen": -2.711427688598633, |
|
"logits/rejected": -2.6692211627960205, |
|
"logps/chosen": -271.66339111328125, |
|
"logps/pi_response": -147.83273315429688, |
|
"logps/ref_response": -125.85347747802734, |
|
"logps/rejected": -317.5698547363281, |
|
"loss": 0.5981, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.38658708333969116, |
|
"rewards/margins": 0.37116459012031555, |
|
"rewards/rejected": -0.7577516436576843, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.1500545527530544e-08, |
|
"logits/chosen": -2.7132654190063477, |
|
"logits/rejected": -2.6600053310394287, |
|
"logps/chosen": -278.91387939453125, |
|
"logps/pi_response": -148.5419158935547, |
|
"logps/ref_response": -117.31595611572266, |
|
"logps/rejected": -345.1435852050781, |
|
"loss": 0.5877, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.4909982681274414, |
|
"rewards/margins": 0.4453675150871277, |
|
"rewards/rejected": -0.9363657236099243, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 48, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6218686103820801, |
|
"train_runtime": 2894.9247, |
|
"train_samples_per_second": 4.224, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 48, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|