ClaimVer_Gemma-7B-Chat / trainer_state.json
preetam7's picture
Upload 10 files
c5859c5 verified
raw
history blame contribute delete
No virus
18.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9952941176470587,
"eval_steps": 500,
"global_step": 424,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023529411764705882,
"grad_norm": 1.9443631172180176,
"learning_rate": 4.998284588246634e-05,
"loss": 1.5607,
"num_input_tokens_seen": 47376,
"step": 5
},
{
"epoch": 0.047058823529411764,
"grad_norm": 1.132569670677185,
"learning_rate": 4.9931407070965254e-05,
"loss": 0.9492,
"num_input_tokens_seen": 97888,
"step": 10
},
{
"epoch": 0.07058823529411765,
"grad_norm": 0.9147533178329468,
"learning_rate": 4.984575415649019e-05,
"loss": 0.7785,
"num_input_tokens_seen": 145664,
"step": 15
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.8164705038070679,
"learning_rate": 4.97260046830541e-05,
"loss": 0.6287,
"num_input_tokens_seen": 192544,
"step": 20
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.781484842300415,
"learning_rate": 4.957232298638036e-05,
"loss": 0.6056,
"num_input_tokens_seen": 241760,
"step": 25
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.757462203502655,
"learning_rate": 4.9384919968379945e-05,
"loss": 0.5574,
"num_input_tokens_seen": 289600,
"step": 30
},
{
"epoch": 0.16470588235294117,
"grad_norm": 0.7582159042358398,
"learning_rate": 4.916405280772462e-05,
"loss": 0.5507,
"num_input_tokens_seen": 340816,
"step": 35
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.8797752857208252,
"learning_rate": 4.891002460691306e-05,
"loss": 0.5484,
"num_input_tokens_seen": 389424,
"step": 40
},
{
"epoch": 0.21176470588235294,
"grad_norm": 0.726173460483551,
"learning_rate": 4.862318397631434e-05,
"loss": 0.5035,
"num_input_tokens_seen": 438224,
"step": 45
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.9025217294692993,
"learning_rate": 4.83039245557597e-05,
"loss": 0.4869,
"num_input_tokens_seen": 486496,
"step": 50
},
{
"epoch": 0.25882352941176473,
"grad_norm": 0.75713711977005,
"learning_rate": 4.795268447433907e-05,
"loss": 0.4514,
"num_input_tokens_seen": 537520,
"step": 55
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.7639597654342651,
"learning_rate": 4.756994574914359e-05,
"loss": 0.4444,
"num_input_tokens_seen": 584288,
"step": 60
},
{
"epoch": 0.3058823529411765,
"grad_norm": 0.8081209063529968,
"learning_rate": 4.715623362377939e-05,
"loss": 0.4459,
"num_input_tokens_seen": 633984,
"step": 65
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.8182061910629272,
"learning_rate": 4.6712115847560355e-05,
"loss": 0.421,
"num_input_tokens_seen": 685600,
"step": 70
},
{
"epoch": 0.35294117647058826,
"grad_norm": 0.9124415516853333,
"learning_rate": 4.6238201896369055e-05,
"loss": 0.4397,
"num_input_tokens_seen": 734496,
"step": 75
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.855204701423645,
"learning_rate": 4.573514213625505e-05,
"loss": 0.4105,
"num_input_tokens_seen": 784720,
"step": 80
},
{
"epoch": 0.4,
"grad_norm": 0.9326411485671997,
"learning_rate": 4.5203626930918455e-05,
"loss": 0.4127,
"num_input_tokens_seen": 837808,
"step": 85
},
{
"epoch": 0.4235294117647059,
"grad_norm": 1.0282217264175415,
"learning_rate": 4.464438569430354e-05,
"loss": 0.4347,
"num_input_tokens_seen": 888032,
"step": 90
},
{
"epoch": 0.4470588235294118,
"grad_norm": 0.940098762512207,
"learning_rate": 4.40581858896025e-05,
"loss": 0.4226,
"num_input_tokens_seen": 938032,
"step": 95
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.9014526009559631,
"learning_rate": 4.344583197604318e-05,
"loss": 0.3822,
"num_input_tokens_seen": 988176,
"step": 100
},
{
"epoch": 0.49411764705882355,
"grad_norm": 1.1154155731201172,
"learning_rate": 4.2808164304906026e-05,
"loss": 0.4146,
"num_input_tokens_seen": 1040016,
"step": 105
},
{
"epoch": 0.5176470588235295,
"grad_norm": 1.0546766519546509,
"learning_rate": 4.214605796628527e-05,
"loss": 0.4025,
"num_input_tokens_seen": 1090624,
"step": 110
},
{
"epoch": 0.5411764705882353,
"grad_norm": 0.9450793862342834,
"learning_rate": 4.14604215881771e-05,
"loss": 0.3959,
"num_input_tokens_seen": 1143280,
"step": 115
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.9457511901855469,
"learning_rate": 4.075219608954278e-05,
"loss": 0.3886,
"num_input_tokens_seen": 1191616,
"step": 120
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.9637866616249084,
"learning_rate": 4.00223533890578e-05,
"loss": 0.3897,
"num_input_tokens_seen": 1240144,
"step": 125
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.9130258560180664,
"learning_rate": 3.927189507131938e-05,
"loss": 0.3861,
"num_input_tokens_seen": 1288912,
"step": 130
},
{
"epoch": 0.6352941176470588,
"grad_norm": 0.9221341013908386,
"learning_rate": 3.8501851012342446e-05,
"loss": 0.3885,
"num_input_tokens_seen": 1335472,
"step": 135
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.8792950510978699,
"learning_rate": 3.7713277966230514e-05,
"loss": 0.3737,
"num_input_tokens_seen": 1385568,
"step": 140
},
{
"epoch": 0.6823529411764706,
"grad_norm": 0.9884141683578491,
"learning_rate": 3.690725811496092e-05,
"loss": 0.3741,
"num_input_tokens_seen": 1434384,
"step": 145
},
{
"epoch": 0.7058823529411765,
"grad_norm": 1.116599678993225,
"learning_rate": 3.608489758327472e-05,
"loss": 0.4087,
"num_input_tokens_seen": 1488448,
"step": 150
},
{
"epoch": 0.7294117647058823,
"grad_norm": 0.968177855014801,
"learning_rate": 3.524732492070915e-05,
"loss": 0.3845,
"num_input_tokens_seen": 1537472,
"step": 155
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.9096731543540955,
"learning_rate": 3.4395689552855955e-05,
"loss": 0.3786,
"num_input_tokens_seen": 1587072,
"step": 160
},
{
"epoch": 0.7764705882352941,
"grad_norm": 1.0453791618347168,
"learning_rate": 3.3531160203970805e-05,
"loss": 0.3605,
"num_input_tokens_seen": 1636624,
"step": 165
},
{
"epoch": 0.8,
"grad_norm": 1.0058122873306274,
"learning_rate": 3.265492329309867e-05,
"loss": 0.3775,
"num_input_tokens_seen": 1687904,
"step": 170
},
{
"epoch": 0.8235294117647058,
"grad_norm": 1.1612142324447632,
"learning_rate": 3.1768181305916066e-05,
"loss": 0.3724,
"num_input_tokens_seen": 1734544,
"step": 175
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.987257182598114,
"learning_rate": 3.0872151144524595e-05,
"loss": 0.3657,
"num_input_tokens_seen": 1781216,
"step": 180
},
{
"epoch": 0.8705882352941177,
"grad_norm": 1.0701205730438232,
"learning_rate": 2.996806245746044e-05,
"loss": 0.3551,
"num_input_tokens_seen": 1830144,
"step": 185
},
{
"epoch": 0.8941176470588236,
"grad_norm": 1.1877062320709229,
"learning_rate": 2.9057155952211502e-05,
"loss": 0.3801,
"num_input_tokens_seen": 1876288,
"step": 190
},
{
"epoch": 0.9176470588235294,
"grad_norm": 1.1159309148788452,
"learning_rate": 2.8140681692558035e-05,
"loss": 0.3589,
"num_input_tokens_seen": 1925792,
"step": 195
},
{
"epoch": 0.9411764705882353,
"grad_norm": 1.0033628940582275,
"learning_rate": 2.7219897383073373e-05,
"loss": 0.3625,
"num_input_tokens_seen": 1974368,
"step": 200
},
{
"epoch": 0.9647058823529412,
"grad_norm": 1.0003060102462769,
"learning_rate": 2.629606664313896e-05,
"loss": 0.3466,
"num_input_tokens_seen": 2022080,
"step": 205
},
{
"epoch": 0.9882352941176471,
"grad_norm": 1.1048098802566528,
"learning_rate": 2.537045727284232e-05,
"loss": 0.3353,
"num_input_tokens_seen": 2069920,
"step": 210
},
{
"epoch": 1.011764705882353,
"grad_norm": 0.930486261844635,
"learning_rate": 2.444433951313772e-05,
"loss": 0.3247,
"num_input_tokens_seen": 2120704,
"step": 215
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.9562831521034241,
"learning_rate": 2.3518984302657146e-05,
"loss": 0.3169,
"num_input_tokens_seen": 2171968,
"step": 220
},
{
"epoch": 1.0588235294117647,
"grad_norm": 1.0542248487472534,
"learning_rate": 2.259566153356389e-05,
"loss": 0.3231,
"num_input_tokens_seen": 2223472,
"step": 225
},
{
"epoch": 1.0823529411764705,
"grad_norm": 1.0505027770996094,
"learning_rate": 2.1675638308842145e-05,
"loss": 0.3326,
"num_input_tokens_seen": 2273296,
"step": 230
},
{
"epoch": 1.1058823529411765,
"grad_norm": 1.1345044374465942,
"learning_rate": 2.0760177203414368e-05,
"loss": 0.3084,
"num_input_tokens_seen": 2323008,
"step": 235
},
{
"epoch": 1.1294117647058823,
"grad_norm": 1.0677368640899658,
"learning_rate": 1.9850534531472546e-05,
"loss": 0.3207,
"num_input_tokens_seen": 2373744,
"step": 240
},
{
"epoch": 1.1529411764705881,
"grad_norm": 0.9943181276321411,
"learning_rate": 1.8947958622401328e-05,
"loss": 0.3446,
"num_input_tokens_seen": 2425216,
"step": 245
},
{
"epoch": 1.1764705882352942,
"grad_norm": 1.0221079587936401,
"learning_rate": 1.8053688107658908e-05,
"loss": 0.3066,
"num_input_tokens_seen": 2472864,
"step": 250
},
{
"epoch": 1.2,
"grad_norm": 1.1166061162948608,
"learning_rate": 1.7168950220966614e-05,
"loss": 0.3286,
"num_input_tokens_seen": 2523344,
"step": 255
},
{
"epoch": 1.223529411764706,
"grad_norm": 1.1198899745941162,
"learning_rate": 1.6294959114140034e-05,
"loss": 0.2845,
"num_input_tokens_seen": 2571936,
"step": 260
},
{
"epoch": 1.2470588235294118,
"grad_norm": 1.1331160068511963,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.3295,
"num_input_tokens_seen": 2622560,
"step": 265
},
{
"epoch": 1.2705882352941176,
"grad_norm": 1.177079200744629,
"learning_rate": 1.4583998460759424e-05,
"loss": 0.2841,
"num_input_tokens_seen": 2669232,
"step": 270
},
{
"epoch": 1.2941176470588236,
"grad_norm": 1.245535969734192,
"learning_rate": 1.3749376915816886e-05,
"loss": 0.3103,
"num_input_tokens_seen": 2718128,
"step": 275
},
{
"epoch": 1.3176470588235294,
"grad_norm": 1.065374732017517,
"learning_rate": 1.2930194931731382e-05,
"loss": 0.313,
"num_input_tokens_seen": 2766432,
"step": 280
},
{
"epoch": 1.3411764705882354,
"grad_norm": 1.0573943853378296,
"learning_rate": 1.2127576696025828e-05,
"loss": 0.3335,
"num_input_tokens_seen": 2819760,
"step": 285
},
{
"epoch": 1.3647058823529412,
"grad_norm": 1.1788841485977173,
"learning_rate": 1.1342623665304209e-05,
"loss": 0.3179,
"num_input_tokens_seen": 2867920,
"step": 290
},
{
"epoch": 1.388235294117647,
"grad_norm": 1.1774731874465942,
"learning_rate": 1.0576413053690327e-05,
"loss": 0.3095,
"num_input_tokens_seen": 2914944,
"step": 295
},
{
"epoch": 1.4117647058823528,
"grad_norm": 1.0766475200653076,
"learning_rate": 9.829996354535172e-06,
"loss": 0.3081,
"num_input_tokens_seen": 2965600,
"step": 300
},
{
"epoch": 1.4352941176470588,
"grad_norm": 1.1550407409667969,
"learning_rate": 9.104397897421623e-06,
"loss": 0.3307,
"num_input_tokens_seen": 3015728,
"step": 305
},
{
"epoch": 1.4588235294117646,
"grad_norm": 1.1318764686584473,
"learning_rate": 8.400613442446948e-06,
"loss": 0.3112,
"num_input_tokens_seen": 3065328,
"step": 310
},
{
"epoch": 1.4823529411764707,
"grad_norm": 1.028689980506897,
"learning_rate": 7.719608813711848e-06,
"loss": 0.3028,
"num_input_tokens_seen": 3114720,
"step": 315
},
{
"epoch": 1.5058823529411764,
"grad_norm": 1.1691759824752808,
"learning_rate": 7.062318573891716e-06,
"loss": 0.3274,
"num_input_tokens_seen": 3163008,
"step": 320
},
{
"epoch": 1.5294117647058822,
"grad_norm": 1.2019788026809692,
"learning_rate": 6.429644741708779e-06,
"loss": 0.3079,
"num_input_tokens_seen": 3211408,
"step": 325
},
{
"epoch": 1.5529411764705883,
"grad_norm": 1.123449683189392,
"learning_rate": 5.822455554065217e-06,
"loss": 0.3215,
"num_input_tokens_seen": 3263808,
"step": 330
},
{
"epoch": 1.576470588235294,
"grad_norm": 1.2180700302124023,
"learning_rate": 5.241584274536259e-06,
"loss": 0.2914,
"num_input_tokens_seen": 3311056,
"step": 335
},
{
"epoch": 1.6,
"grad_norm": 1.220276951789856,
"learning_rate": 4.687828049857967e-06,
"loss": 0.3065,
"num_input_tokens_seen": 3358224,
"step": 340
},
{
"epoch": 1.6235294117647059,
"grad_norm": 1.2603631019592285,
"learning_rate": 4.161946815979403e-06,
"loss": 0.3087,
"num_input_tokens_seen": 3409024,
"step": 345
},
{
"epoch": 1.6470588235294117,
"grad_norm": 1.0871949195861816,
"learning_rate": 3.6646622551801345e-06,
"loss": 0.2662,
"num_input_tokens_seen": 3457616,
"step": 350
},
{
"epoch": 1.6705882352941175,
"grad_norm": 1.0722944736480713,
"learning_rate": 3.19665680568445e-06,
"loss": 0.3142,
"num_input_tokens_seen": 3510768,
"step": 355
},
{
"epoch": 1.6941176470588235,
"grad_norm": 1.1241310834884644,
"learning_rate": 2.75857272513132e-06,
"loss": 0.3126,
"num_input_tokens_seen": 3556912,
"step": 360
},
{
"epoch": 1.7176470588235295,
"grad_norm": 1.0836745500564575,
"learning_rate": 2.351011209185336e-06,
"loss": 0.2863,
"num_input_tokens_seen": 3606016,
"step": 365
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.991625964641571,
"learning_rate": 1.9745315664982276e-06,
"loss": 0.3068,
"num_input_tokens_seen": 3654256,
"step": 370
},
{
"epoch": 1.7647058823529411,
"grad_norm": 1.1158082485198975,
"learning_rate": 1.6296504511531836e-06,
"loss": 0.3021,
"num_input_tokens_seen": 3704528,
"step": 375
},
{
"epoch": 1.788235294117647,
"grad_norm": 1.1175050735473633,
"learning_rate": 1.3168411536452152e-06,
"loss": 0.2833,
"num_input_tokens_seen": 3752208,
"step": 380
},
{
"epoch": 1.811764705882353,
"grad_norm": 1.0604891777038574,
"learning_rate": 1.036532951370736e-06,
"loss": 0.3073,
"num_input_tokens_seen": 3805232,
"step": 385
},
{
"epoch": 1.835294117647059,
"grad_norm": 1.1914643049240112,
"learning_rate": 7.891105195175358e-07,
"loss": 0.3044,
"num_input_tokens_seen": 3854864,
"step": 390
},
{
"epoch": 1.8588235294117648,
"grad_norm": 1.1603955030441284,
"learning_rate": 5.749134031637349e-07,
"loss": 0.2991,
"num_input_tokens_seen": 3904880,
"step": 395
},
{
"epoch": 1.8823529411764706,
"grad_norm": 1.1468857526779175,
"learning_rate": 3.9423555131007925e-07,
"loss": 0.3107,
"num_input_tokens_seen": 3953728,
"step": 400
},
{
"epoch": 1.9058823529411764,
"grad_norm": 1.193662405014038,
"learning_rate": 2.473249134850808e-07,
"loss": 0.2987,
"num_input_tokens_seen": 4002944,
"step": 405
},
{
"epoch": 1.9294117647058824,
"grad_norm": 1.2122139930725098,
"learning_rate": 1.343830994765982e-07,
"loss": 0.2877,
"num_input_tokens_seen": 4050880,
"step": 410
},
{
"epoch": 1.9529411764705882,
"grad_norm": 1.1407126188278198,
"learning_rate": 5.5565102656787714e-08,
"loss": 0.2891,
"num_input_tokens_seen": 4098064,
"step": 415
},
{
"epoch": 1.9764705882352942,
"grad_norm": 1.1497220993041992,
"learning_rate": 1.0979087280141298e-08,
"loss": 0.327,
"num_input_tokens_seen": 4147296,
"step": 420
},
{
"epoch": 1.9952941176470587,
"num_input_tokens_seen": 4186528,
"step": 424,
"total_flos": 1.9533295226486784e+17,
"train_loss": 0.3890873733556496,
"train_runtime": 4977.815,
"train_samples_per_second": 1.366,
"train_steps_per_second": 0.085
}
],
"logging_steps": 5,
"max_steps": 424,
"num_input_tokens_seen": 4186528,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9533295226486784e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}