|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.641975308641975, |
|
"eval_steps": 500, |
|
"global_step": 700, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6172839506172839, |
|
"grad_norm": 62.88630294799805, |
|
"learning_rate": 9.916000000000001e-06, |
|
"loss": 6.6856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2345679012345678, |
|
"grad_norm": 387.6339111328125, |
|
"learning_rate": 9.818000000000002e-06, |
|
"loss": 5.5869, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8518518518518519, |
|
"grad_norm": 245.18975830078125, |
|
"learning_rate": 9.718e-06, |
|
"loss": 5.159, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"grad_norm": 330.3158264160156, |
|
"learning_rate": 9.618e-06, |
|
"loss": 4.0494, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.0864197530864197, |
|
"grad_norm": 138.14215087890625, |
|
"learning_rate": 9.518000000000001e-06, |
|
"loss": 3.6313, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.7037037037037037, |
|
"grad_norm": 166.69479370117188, |
|
"learning_rate": 9.418e-06, |
|
"loss": 3.3391, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.320987654320987, |
|
"grad_norm": 107.54407501220703, |
|
"learning_rate": 9.318e-06, |
|
"loss": 3.3295, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 293.8923034667969, |
|
"learning_rate": 9.218e-06, |
|
"loss": 3.0956, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 226.05360412597656, |
|
"learning_rate": 9.118000000000001e-06, |
|
"loss": 3.0271, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.172839506172839, |
|
"grad_norm": 352.8487854003906, |
|
"learning_rate": 9.018e-06, |
|
"loss": 3.0627, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.790123456790123, |
|
"grad_norm": 97.6641616821289, |
|
"learning_rate": 8.918000000000002e-06, |
|
"loss": 3.0312, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.407407407407407, |
|
"grad_norm": 90.92312622070312, |
|
"learning_rate": 8.818000000000001e-06, |
|
"loss": 3.0228, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 8.024691358024691, |
|
"grad_norm": 241.7625732421875, |
|
"learning_rate": 8.718e-06, |
|
"loss": 3.0873, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.641975308641975, |
|
"grad_norm": 162.14071655273438, |
|
"learning_rate": 8.618000000000001e-06, |
|
"loss": 2.9582, |
|
"step": 700 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 62, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.675702163456e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|