test-fn-1 / last-checkpoint /trainer_state.json
Federic's picture
Training in progress, step 25, checkpoint
8e0dc0e verified
raw
history blame contribute delete
No virus
4.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.008484162895927601,
"eval_steps": 500,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.21934866905212402,
"learning_rate": 0.0002,
"loss": 2.1352,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.37619397044181824,
"learning_rate": 0.0002,
"loss": 2.3002,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 0.43210744857788086,
"learning_rate": 0.0002,
"loss": 2.041,
"step": 3
},
{
"epoch": 0.0,
"grad_norm": 0.4717111885547638,
"learning_rate": 0.0002,
"loss": 1.839,
"step": 4
},
{
"epoch": 0.0,
"grad_norm": 0.6455919146537781,
"learning_rate": 0.0002,
"loss": 1.5939,
"step": 5
},
{
"epoch": 0.0,
"grad_norm": 1.2801408767700195,
"learning_rate": 0.0002,
"loss": 1.327,
"step": 6
},
{
"epoch": 0.0,
"grad_norm": 0.9770981669425964,
"learning_rate": 0.0002,
"loss": 1.1536,
"step": 7
},
{
"epoch": 0.0,
"grad_norm": 1.177263617515564,
"learning_rate": 0.0002,
"loss": 0.9881,
"step": 8
},
{
"epoch": 0.0,
"grad_norm": 0.6201061606407166,
"learning_rate": 0.0002,
"loss": 0.8609,
"step": 9
},
{
"epoch": 0.0,
"grad_norm": 1.45395827293396,
"learning_rate": 0.0002,
"loss": 0.8477,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 1.0724296569824219,
"learning_rate": 0.0002,
"loss": 0.7573,
"step": 11
},
{
"epoch": 0.0,
"grad_norm": 0.9028312563896179,
"learning_rate": 0.0002,
"loss": 0.7258,
"step": 12
},
{
"epoch": 0.0,
"grad_norm": 0.8523911237716675,
"learning_rate": 0.0002,
"loss": 0.7513,
"step": 13
},
{
"epoch": 0.0,
"grad_norm": 0.6326367855072021,
"learning_rate": 0.0002,
"loss": 0.7186,
"step": 14
},
{
"epoch": 0.01,
"grad_norm": 0.39301833510398865,
"learning_rate": 0.0002,
"loss": 0.706,
"step": 15
},
{
"epoch": 0.01,
"grad_norm": 0.5725602507591248,
"learning_rate": 0.0002,
"loss": 0.6406,
"step": 16
},
{
"epoch": 0.01,
"grad_norm": 0.6625002026557922,
"learning_rate": 0.0002,
"loss": 0.6349,
"step": 17
},
{
"epoch": 0.01,
"grad_norm": 0.19412539899349213,
"learning_rate": 0.0002,
"loss": 0.6134,
"step": 18
},
{
"epoch": 0.01,
"grad_norm": 0.34864893555641174,
"learning_rate": 0.0002,
"loss": 0.6381,
"step": 19
},
{
"epoch": 0.01,
"grad_norm": 0.30731046199798584,
"learning_rate": 0.0002,
"loss": 0.6553,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 0.1636987179517746,
"learning_rate": 0.0002,
"loss": 0.5984,
"step": 21
},
{
"epoch": 0.01,
"grad_norm": 0.1931622475385666,
"learning_rate": 0.0002,
"loss": 0.5821,
"step": 22
},
{
"epoch": 0.01,
"grad_norm": 0.32078325748443604,
"learning_rate": 0.0002,
"loss": 0.586,
"step": 23
},
{
"epoch": 0.01,
"grad_norm": 0.17550581693649292,
"learning_rate": 0.0002,
"loss": 0.5315,
"step": 24
},
{
"epoch": 0.01,
"grad_norm": 0.22394584119319916,
"learning_rate": 0.0002,
"loss": 0.5415,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 2946,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"total_flos": 6828259445391360.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}