|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 27.160493827160494, |
|
"eval_steps": 500, |
|
"global_step": 2200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6172839506172839, |
|
"grad_norm": 14.113041877746582, |
|
"learning_rate": 9.953000000000001e-06, |
|
"loss": 9.3136, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2345679012345678, |
|
"grad_norm": 11.032143592834473, |
|
"learning_rate": 9.903e-06, |
|
"loss": 8.8439, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8518518518518519, |
|
"grad_norm": 21.83379554748535, |
|
"learning_rate": 9.853e-06, |
|
"loss": 8.9582, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"grad_norm": 13.823728561401367, |
|
"learning_rate": 9.803e-06, |
|
"loss": 8.5429, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.0864197530864197, |
|
"grad_norm": 14.090177536010742, |
|
"learning_rate": 9.753e-06, |
|
"loss": 8.0873, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.7037037037037037, |
|
"grad_norm": 16.695327758789062, |
|
"learning_rate": 9.703000000000002e-06, |
|
"loss": 7.5718, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.320987654320987, |
|
"grad_norm": 12.39268970489502, |
|
"learning_rate": 9.653e-06, |
|
"loss": 7.2107, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 20.64848518371582, |
|
"learning_rate": 9.603000000000001e-06, |
|
"loss": 6.633, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 10.930610656738281, |
|
"learning_rate": 9.553000000000002e-06, |
|
"loss": 6.3085, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.172839506172839, |
|
"grad_norm": 7.421260833740234, |
|
"learning_rate": 9.503e-06, |
|
"loss": 5.9761, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.790123456790123, |
|
"grad_norm": 19.653493881225586, |
|
"learning_rate": 9.453e-06, |
|
"loss": 6.0252, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.407407407407407, |
|
"grad_norm": 10.195016860961914, |
|
"learning_rate": 9.403000000000001e-06, |
|
"loss": 5.9531, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 8.024691358024691, |
|
"grad_norm": 15.588345527648926, |
|
"learning_rate": 9.353000000000002e-06, |
|
"loss": 5.979, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.641975308641975, |
|
"grad_norm": 11.010184288024902, |
|
"learning_rate": 9.303e-06, |
|
"loss": 5.9684, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 9.25925925925926, |
|
"grad_norm": 12.536541938781738, |
|
"learning_rate": 9.253000000000001e-06, |
|
"loss": 6.0309, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 9.876543209876543, |
|
"grad_norm": 10.15280818939209, |
|
"learning_rate": 9.203000000000002e-06, |
|
"loss": 5.7873, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 10.493827160493828, |
|
"grad_norm": 18.81814956665039, |
|
"learning_rate": 9.153e-06, |
|
"loss": 5.7782, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 11.11111111111111, |
|
"grad_norm": 21.824844360351562, |
|
"learning_rate": 9.103e-06, |
|
"loss": 5.9619, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 11.728395061728396, |
|
"grad_norm": 14.213790893554688, |
|
"learning_rate": 9.053000000000001e-06, |
|
"loss": 5.7755, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 12.345679012345679, |
|
"grad_norm": 16.542211532592773, |
|
"learning_rate": 9.003e-06, |
|
"loss": 5.837, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 12.962962962962964, |
|
"grad_norm": 20.188156127929688, |
|
"learning_rate": 8.953e-06, |
|
"loss": 5.7611, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 13.580246913580247, |
|
"grad_norm": 14.349571228027344, |
|
"learning_rate": 8.903000000000001e-06, |
|
"loss": 5.8904, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 14.197530864197532, |
|
"grad_norm": 22.795913696289062, |
|
"learning_rate": 8.853e-06, |
|
"loss": 5.7211, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 14.814814814814815, |
|
"grad_norm": 10.970526695251465, |
|
"learning_rate": 8.803e-06, |
|
"loss": 5.7186, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 15.432098765432098, |
|
"grad_norm": 22.462017059326172, |
|
"learning_rate": 8.753e-06, |
|
"loss": 5.6172, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 16.049382716049383, |
|
"grad_norm": 15.98877239227295, |
|
"learning_rate": 8.703e-06, |
|
"loss": 5.7045, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 16.666666666666668, |
|
"grad_norm": 10.7304048538208, |
|
"learning_rate": 8.653e-06, |
|
"loss": 5.77, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 17.28395061728395, |
|
"grad_norm": 14.929125785827637, |
|
"learning_rate": 8.603e-06, |
|
"loss": 5.6916, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 17.901234567901234, |
|
"grad_norm": 12.337247848510742, |
|
"learning_rate": 8.553000000000001e-06, |
|
"loss": 5.6017, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 18.51851851851852, |
|
"grad_norm": 10.826752662658691, |
|
"learning_rate": 8.503e-06, |
|
"loss": 5.5774, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 19.135802469135804, |
|
"grad_norm": 31.670007705688477, |
|
"learning_rate": 8.453000000000002e-06, |
|
"loss": 5.6931, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 19.753086419753085, |
|
"grad_norm": 11.033102989196777, |
|
"learning_rate": 8.403e-06, |
|
"loss": 5.6504, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 20.37037037037037, |
|
"grad_norm": 12.53508472442627, |
|
"learning_rate": 8.353000000000001e-06, |
|
"loss": 5.7015, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 20.987654320987655, |
|
"grad_norm": 13.549042701721191, |
|
"learning_rate": 8.304e-06, |
|
"loss": 5.6826, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 21.604938271604937, |
|
"grad_norm": 13.270482063293457, |
|
"learning_rate": 8.254000000000001e-06, |
|
"loss": 5.5371, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 22.22222222222222, |
|
"grad_norm": 15.088618278503418, |
|
"learning_rate": 8.204000000000001e-06, |
|
"loss": 5.6817, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 22.839506172839506, |
|
"grad_norm": 12.706061363220215, |
|
"learning_rate": 8.154e-06, |
|
"loss": 5.6142, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 23.45679012345679, |
|
"grad_norm": 12.286702156066895, |
|
"learning_rate": 8.104e-06, |
|
"loss": 5.4426, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 24.074074074074073, |
|
"grad_norm": 19.067840576171875, |
|
"learning_rate": 8.054000000000001e-06, |
|
"loss": 5.5586, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 24.691358024691358, |
|
"grad_norm": 17.961570739746094, |
|
"learning_rate": 8.004e-06, |
|
"loss": 5.5784, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 25.308641975308642, |
|
"grad_norm": 15.893245697021484, |
|
"learning_rate": 7.954e-06, |
|
"loss": 5.4392, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 25.925925925925927, |
|
"grad_norm": 20.282608032226562, |
|
"learning_rate": 7.904000000000001e-06, |
|
"loss": 5.6877, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 26.54320987654321, |
|
"grad_norm": 10.545083045959473, |
|
"learning_rate": 7.854e-06, |
|
"loss": 5.5013, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 27.160493827160494, |
|
"grad_norm": 14.949348449707031, |
|
"learning_rate": 7.804e-06, |
|
"loss": 5.5302, |
|
"step": 2200 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 124, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.409349656576e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|