Sheared-LLaMA-1.3B-sft-lora / trainer_state.json
SebastianSchramm's picture
Model save
cfc55a4
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6699636307656782,
"eval_steps": 500,
"global_step": 272,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 3.5714285714285716e-07,
"loss": 1.4263,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.7857142857142859e-06,
"loss": 1.4275,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 3.5714285714285718e-06,
"loss": 1.4298,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 5.357142857142857e-06,
"loss": 1.424,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 7.1428571428571436e-06,
"loss": 1.4395,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 8.92857142857143e-06,
"loss": 1.4306,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 9.998342337571566e-06,
"loss": 1.4306,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 9.97970625586178e-06,
"loss": 1.4257,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 9.940439480455386e-06,
"loss": 1.4302,
"step": 40
},
{
"epoch": 0.11,
"learning_rate": 9.880704691794608e-06,
"loss": 1.4312,
"step": 45
},
{
"epoch": 0.12,
"learning_rate": 9.80074936835801e-06,
"loss": 1.4136,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 9.70090476136855e-06,
"loss": 1.4217,
"step": 55
},
{
"epoch": 0.15,
"learning_rate": 9.581584522435025e-06,
"loss": 1.4138,
"step": 60
},
{
"epoch": 0.16,
"learning_rate": 9.443282989812494e-06,
"loss": 1.4091,
"step": 65
},
{
"epoch": 0.17,
"learning_rate": 9.286573140381663e-06,
"loss": 1.4004,
"step": 70
},
{
"epoch": 0.18,
"learning_rate": 9.112104215832047e-06,
"loss": 1.3973,
"step": 75
},
{
"epoch": 0.2,
"learning_rate": 8.920599032883553e-06,
"loss": 1.3976,
"step": 80
},
{
"epoch": 0.21,
"learning_rate": 8.712850988690094e-06,
"loss": 1.3982,
"step": 85
},
{
"epoch": 0.22,
"learning_rate": 8.489720773831717e-06,
"loss": 1.3978,
"step": 90
},
{
"epoch": 0.23,
"learning_rate": 8.25213280651317e-06,
"loss": 1.3858,
"step": 95
},
{
"epoch": 0.25,
"learning_rate": 8.001071402741843e-06,
"loss": 1.379,
"step": 100
},
{
"epoch": 0.26,
"learning_rate": 7.737576698351878e-06,
"loss": 1.386,
"step": 105
},
{
"epoch": 0.27,
"learning_rate": 7.462740339769323e-06,
"loss": 1.4033,
"step": 110
},
{
"epoch": 0.28,
"learning_rate": 7.177700961371239e-06,
"loss": 1.3782,
"step": 115
},
{
"epoch": 0.3,
"learning_rate": 6.883639468175926e-06,
"loss": 1.3731,
"step": 120
},
{
"epoch": 0.31,
"learning_rate": 6.58177414340781e-06,
"loss": 1.3741,
"step": 125
},
{
"epoch": 0.32,
"learning_rate": 6.273355601206143e-06,
"loss": 1.3714,
"step": 130
},
{
"epoch": 0.33,
"learning_rate": 5.959661605388229e-06,
"loss": 1.3783,
"step": 135
},
{
"epoch": 0.34,
"learning_rate": 5.641991775732756e-06,
"loss": 1.3813,
"step": 140
},
{
"epoch": 0.36,
"learning_rate": 5.321662203714909e-06,
"loss": 1.3619,
"step": 145
},
{
"epoch": 0.37,
"learning_rate": 5e-06,
"loss": 1.3739,
"step": 150
},
{
"epoch": 0.38,
"learning_rate": 4.678337796285093e-06,
"loss": 1.364,
"step": 155
},
{
"epoch": 0.39,
"learning_rate": 4.358008224267245e-06,
"loss": 1.3588,
"step": 160
},
{
"epoch": 0.41,
"learning_rate": 4.040338394611772e-06,
"loss": 1.3539,
"step": 165
},
{
"epoch": 0.42,
"learning_rate": 3.726644398793857e-06,
"loss": 1.3588,
"step": 170
},
{
"epoch": 0.43,
"learning_rate": 3.4182258565921933e-06,
"loss": 1.3632,
"step": 175
},
{
"epoch": 0.44,
"learning_rate": 3.116360531824074e-06,
"loss": 1.3584,
"step": 180
},
{
"epoch": 0.46,
"learning_rate": 2.822299038628762e-06,
"loss": 1.3546,
"step": 185
},
{
"epoch": 0.47,
"learning_rate": 2.537259660230679e-06,
"loss": 1.3628,
"step": 190
},
{
"epoch": 0.48,
"learning_rate": 2.2624233016481224e-06,
"loss": 1.3627,
"step": 195
},
{
"epoch": 0.49,
"learning_rate": 1.9989285972581595e-06,
"loss": 1.3644,
"step": 200
},
{
"epoch": 0.5,
"learning_rate": 1.7478671934868302e-06,
"loss": 1.356,
"step": 205
},
{
"epoch": 0.52,
"learning_rate": 1.5102792261682813e-06,
"loss": 1.3556,
"step": 210
},
{
"epoch": 0.53,
"learning_rate": 1.2871490113099066e-06,
"loss": 1.3545,
"step": 215
},
{
"epoch": 0.54,
"learning_rate": 1.0794009671164484e-06,
"loss": 1.3502,
"step": 220
},
{
"epoch": 0.55,
"learning_rate": 8.878957841679542e-07,
"loss": 1.3656,
"step": 225
},
{
"epoch": 0.57,
"learning_rate": 7.13426859618338e-07,
"loss": 1.356,
"step": 230
},
{
"epoch": 0.58,
"learning_rate": 5.567170101875074e-07,
"loss": 1.3602,
"step": 235
},
{
"epoch": 0.59,
"learning_rate": 4.184154775649768e-07,
"loss": 1.3582,
"step": 240
},
{
"epoch": 0.6,
"learning_rate": 2.990952386314505e-07,
"loss": 1.3545,
"step": 245
},
{
"epoch": 0.62,
"learning_rate": 1.992506316419912e-07,
"loss": 1.3565,
"step": 250
},
{
"epoch": 0.63,
"learning_rate": 1.192953082053927e-07,
"loss": 1.3637,
"step": 255
},
{
"epoch": 0.64,
"learning_rate": 5.9560519544614725e-08,
"loss": 1.3604,
"step": 260
},
{
"epoch": 0.65,
"learning_rate": 2.0293744138219495e-08,
"loss": 1.347,
"step": 265
},
{
"epoch": 0.67,
"learning_rate": 1.657662428434792e-09,
"loss": 1.3556,
"step": 270
},
{
"epoch": 0.67,
"eval_loss": 1.3605483770370483,
"eval_runtime": 2578.3151,
"eval_samples_per_second": 8.963,
"eval_steps_per_second": 1.12,
"step": 272
},
{
"epoch": 0.67,
"step": 272,
"total_flos": 2.233306234264486e+18,
"train_loss": 1.3824449540937649,
"train_runtime": 69463.2349,
"train_samples_per_second": 2.005,
"train_steps_per_second": 0.004
}
],
"logging_steps": 5,
"max_steps": 272,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 2.233306234264486e+18,
"trial_name": null,
"trial_params": null
}