tot_llama_update / checkpoint-1000 /trainer_state.json
sallywww's picture
addd
84733df verified
raw
history blame
8.43 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 15.57935735150925,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.31,
"grad_norm": 0.48931118845939636,
"learning_rate": 1.98125e-05,
"loss": 1.7417,
"step": 20
},
{
"epoch": 0.62,
"grad_norm": 0.56327223777771,
"learning_rate": 1.9604166666666668e-05,
"loss": 1.7626,
"step": 40
},
{
"epoch": 0.93,
"grad_norm": 0.6054126620292664,
"learning_rate": 1.9395833333333335e-05,
"loss": 1.5774,
"step": 60
},
{
"epoch": 1.25,
"grad_norm": 0.8035449385643005,
"learning_rate": 1.9187500000000002e-05,
"loss": 1.4772,
"step": 80
},
{
"epoch": 1.56,
"grad_norm": 0.9972027540206909,
"learning_rate": 1.897916666666667e-05,
"loss": 1.409,
"step": 100
},
{
"epoch": 1.87,
"grad_norm": 0.7526600360870361,
"learning_rate": 1.8781250000000003e-05,
"loss": 1.3088,
"step": 120
},
{
"epoch": 2.18,
"grad_norm": 0.8370587825775146,
"learning_rate": 1.8572916666666666e-05,
"loss": 1.2086,
"step": 140
},
{
"epoch": 2.49,
"grad_norm": 0.8337924480438232,
"learning_rate": 1.8364583333333334e-05,
"loss": 1.1321,
"step": 160
},
{
"epoch": 2.8,
"grad_norm": 1.0028570890426636,
"learning_rate": 1.815625e-05,
"loss": 1.0439,
"step": 180
},
{
"epoch": 3.12,
"grad_norm": 0.7663702368736267,
"learning_rate": 1.7947916666666668e-05,
"loss": 1.026,
"step": 200
},
{
"epoch": 3.43,
"grad_norm": 0.8835870623588562,
"learning_rate": 1.7739583333333335e-05,
"loss": 0.9994,
"step": 220
},
{
"epoch": 3.74,
"grad_norm": 1.010624885559082,
"learning_rate": 1.7531250000000003e-05,
"loss": 0.9381,
"step": 240
},
{
"epoch": 4.05,
"grad_norm": 0.9645354151725769,
"learning_rate": 1.7322916666666666e-05,
"loss": 0.8667,
"step": 260
},
{
"epoch": 4.36,
"grad_norm": 0.9926008582115173,
"learning_rate": 1.7114583333333334e-05,
"loss": 0.8729,
"step": 280
},
{
"epoch": 4.67,
"grad_norm": 1.0364482402801514,
"learning_rate": 1.690625e-05,
"loss": 0.836,
"step": 300
},
{
"epoch": 4.99,
"grad_norm": 1.3519001007080078,
"learning_rate": 1.6697916666666668e-05,
"loss": 0.8126,
"step": 320
},
{
"epoch": 5.3,
"grad_norm": 1.2280508279800415,
"learning_rate": 1.6489583333333335e-05,
"loss": 0.7953,
"step": 340
},
{
"epoch": 5.61,
"grad_norm": 0.8931779265403748,
"learning_rate": 1.6281250000000003e-05,
"loss": 0.7813,
"step": 360
},
{
"epoch": 5.92,
"grad_norm": 2.7053027153015137,
"learning_rate": 1.6072916666666667e-05,
"loss": 0.7256,
"step": 380
},
{
"epoch": 6.23,
"grad_norm": 1.411024808883667,
"learning_rate": 1.5864583333333334e-05,
"loss": 0.7336,
"step": 400
},
{
"epoch": 6.54,
"grad_norm": 1.0111807584762573,
"learning_rate": 1.565625e-05,
"loss": 0.7207,
"step": 420
},
{
"epoch": 6.85,
"grad_norm": 2.213623523712158,
"learning_rate": 1.544791666666667e-05,
"loss": 0.664,
"step": 440
},
{
"epoch": 7.17,
"grad_norm": 1.3642323017120361,
"learning_rate": 1.5239583333333334e-05,
"loss": 0.6848,
"step": 460
},
{
"epoch": 7.48,
"grad_norm": 1.3692028522491455,
"learning_rate": 1.5031250000000001e-05,
"loss": 0.6663,
"step": 480
},
{
"epoch": 7.79,
"grad_norm": 1.9850131273269653,
"learning_rate": 1.4822916666666667e-05,
"loss": 0.6199,
"step": 500
},
{
"epoch": 8.1,
"grad_norm": 1.6070563793182373,
"learning_rate": 1.4614583333333334e-05,
"loss": 0.6295,
"step": 520
},
{
"epoch": 8.41,
"grad_norm": 3.226116418838501,
"learning_rate": 1.4406250000000001e-05,
"loss": 0.6227,
"step": 540
},
{
"epoch": 8.72,
"grad_norm": 1.5464348793029785,
"learning_rate": 1.4197916666666667e-05,
"loss": 0.589,
"step": 560
},
{
"epoch": 9.04,
"grad_norm": 1.4290672540664673,
"learning_rate": 1.3989583333333334e-05,
"loss": 0.6263,
"step": 580
},
{
"epoch": 9.35,
"grad_norm": 1.682243824005127,
"learning_rate": 1.3781250000000001e-05,
"loss": 0.5747,
"step": 600
},
{
"epoch": 9.66,
"grad_norm": 1.6785274744033813,
"learning_rate": 1.3572916666666667e-05,
"loss": 0.5809,
"step": 620
},
{
"epoch": 9.97,
"grad_norm": 2.156558036804199,
"learning_rate": 1.3364583333333334e-05,
"loss": 0.5725,
"step": 640
},
{
"epoch": 10.28,
"grad_norm": 1.739134430885315,
"learning_rate": 1.3156250000000001e-05,
"loss": 0.549,
"step": 660
},
{
"epoch": 10.59,
"grad_norm": 1.2140896320343018,
"learning_rate": 1.2947916666666667e-05,
"loss": 0.5493,
"step": 680
},
{
"epoch": 10.91,
"grad_norm": 1.517707109451294,
"learning_rate": 1.2739583333333334e-05,
"loss": 0.5589,
"step": 700
},
{
"epoch": 11.22,
"grad_norm": 1.8752249479293823,
"learning_rate": 1.2531250000000001e-05,
"loss": 0.5001,
"step": 720
},
{
"epoch": 11.53,
"grad_norm": 1.8684614896774292,
"learning_rate": 1.2322916666666667e-05,
"loss": 0.52,
"step": 740
},
{
"epoch": 11.84,
"grad_norm": 2.3552052974700928,
"learning_rate": 1.2114583333333334e-05,
"loss": 0.5479,
"step": 760
},
{
"epoch": 12.15,
"grad_norm": 1.2074153423309326,
"learning_rate": 1.1906250000000001e-05,
"loss": 0.4977,
"step": 780
},
{
"epoch": 12.46,
"grad_norm": 3.0007381439208984,
"learning_rate": 1.1697916666666667e-05,
"loss": 0.5252,
"step": 800
},
{
"epoch": 12.78,
"grad_norm": 1.8956348896026611,
"learning_rate": 1.1489583333333334e-05,
"loss": 0.503,
"step": 820
},
{
"epoch": 13.09,
"grad_norm": 1.3779544830322266,
"learning_rate": 1.1281250000000001e-05,
"loss": 0.4991,
"step": 840
},
{
"epoch": 13.4,
"grad_norm": 1.5404250621795654,
"learning_rate": 1.1072916666666667e-05,
"loss": 0.5026,
"step": 860
},
{
"epoch": 13.71,
"grad_norm": 2.149167060852051,
"learning_rate": 1.0864583333333334e-05,
"loss": 0.4788,
"step": 880
},
{
"epoch": 14.02,
"grad_norm": 1.4978464841842651,
"learning_rate": 1.0656250000000002e-05,
"loss": 0.4605,
"step": 900
},
{
"epoch": 14.33,
"grad_norm": 1.5203664302825928,
"learning_rate": 1.0447916666666667e-05,
"loss": 0.4858,
"step": 920
},
{
"epoch": 14.64,
"grad_norm": 1.850074291229248,
"learning_rate": 1.0239583333333334e-05,
"loss": 0.4797,
"step": 940
},
{
"epoch": 14.96,
"grad_norm": 1.7591063976287842,
"learning_rate": 1.0031250000000002e-05,
"loss": 0.4619,
"step": 960
},
{
"epoch": 15.27,
"grad_norm": 1.4994142055511475,
"learning_rate": 9.822916666666667e-06,
"loss": 0.4627,
"step": 980
},
{
"epoch": 15.58,
"grad_norm": 1.888311743736267,
"learning_rate": 9.614583333333334e-06,
"loss": 0.4469,
"step": 1000
}
],
"logging_steps": 20,
"max_steps": 1920,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"total_flos": 2.5991277871104e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}