mistral-7b-slic / trainer_state.json
yangzhao02's picture
Model save
4be8295 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9981298423724285,
"eval_steps": 200,
"global_step": 467,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0021373230029388193,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.7276527881622314,
"logps": -123.19757843017578,
"loss": 1.0,
"step": 1
},
{
"epoch": 0.010686615014694095,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.8715224266052246,
"logps": -234.59034729003906,
"loss": 1.0,
"step": 5
},
{
"epoch": 0.02137323002938819,
"grad_norm": 48.2801432920563,
"learning_rate": 5.3191489361702123e-08,
"logits": -2.8460676670074463,
"logps": -248.16787719726562,
"loss": 0.9998,
"step": 10
},
{
"epoch": 0.03205984504408229,
"grad_norm": 50.29241141091707,
"learning_rate": 9.574468085106382e-08,
"logits": -2.7791006565093994,
"logps": -229.27816772460938,
"loss": 0.9874,
"step": 15
},
{
"epoch": 0.04274646005877638,
"grad_norm": 46.57864781408822,
"learning_rate": 1.3829787234042553e-07,
"logits": -2.7698864936828613,
"logps": -204.43753051757812,
"loss": 0.9444,
"step": 20
},
{
"epoch": 0.053433075073470476,
"grad_norm": 36.984896584442474,
"learning_rate": 1.9148936170212765e-07,
"logits": -2.9412951469421387,
"logps": -291.7972717285156,
"loss": 0.795,
"step": 25
},
{
"epoch": 0.06411969008816458,
"grad_norm": 25.64585807242414,
"learning_rate": 2.4468085106382976e-07,
"logits": -2.913182258605957,
"logps": -280.78009033203125,
"loss": 0.6761,
"step": 30
},
{
"epoch": 0.07480630510285867,
"grad_norm": 22.189719283428744,
"learning_rate": 2.978723404255319e-07,
"logits": -2.8854992389678955,
"logps": -233.5765380859375,
"loss": 0.6236,
"step": 35
},
{
"epoch": 0.08549292011755276,
"grad_norm": 22.990475128782094,
"learning_rate": 3.5106382978723405e-07,
"logits": -2.8781936168670654,
"logps": -233.80117797851562,
"loss": 0.5835,
"step": 40
},
{
"epoch": 0.09617953513224686,
"grad_norm": 21.917107832657997,
"learning_rate": 4.0425531914893614e-07,
"logits": -2.7908034324645996,
"logps": -257.6076965332031,
"loss": 0.5822,
"step": 45
},
{
"epoch": 0.10686615014694095,
"grad_norm": 22.029695238746953,
"learning_rate": 4.574468085106383e-07,
"logits": -2.614025592803955,
"logps": -270.81671142578125,
"loss": 0.5573,
"step": 50
},
{
"epoch": 0.11755276516163506,
"grad_norm": 21.89479772573872,
"learning_rate": 4.999930062653174e-07,
"logits": -2.7972469329833984,
"logps": -286.5457458496094,
"loss": 0.5318,
"step": 55
},
{
"epoch": 0.12823938017632916,
"grad_norm": 21.44902343753467,
"learning_rate": 4.997482666353286e-07,
"logits": -2.690673828125,
"logps": -253.75869750976562,
"loss": 0.5219,
"step": 60
},
{
"epoch": 0.13892599519102325,
"grad_norm": 21.630693894892058,
"learning_rate": 4.991542314714122e-07,
"logits": -2.73010516166687,
"logps": -293.25762939453125,
"loss": 0.5026,
"step": 65
},
{
"epoch": 0.14961261020571734,
"grad_norm": 25.306412397060555,
"learning_rate": 4.982117315854593e-07,
"logits": -2.5588488578796387,
"logps": -263.66851806640625,
"loss": 0.5258,
"step": 70
},
{
"epoch": 0.16029922522041143,
"grad_norm": 22.279892008137498,
"learning_rate": 4.969220851487844e-07,
"logits": -2.401634693145752,
"logps": -262.5799255371094,
"loss": 0.4803,
"step": 75
},
{
"epoch": 0.17098584023510552,
"grad_norm": 23.85473503890347,
"learning_rate": 4.952870958485431e-07,
"logits": -2.7048754692077637,
"logps": -305.823486328125,
"loss": 0.4825,
"step": 80
},
{
"epoch": 0.18167245524979964,
"grad_norm": 26.91529025786325,
"learning_rate": 4.933090503651128e-07,
"logits": -2.3958709239959717,
"logps": -296.0675964355469,
"loss": 0.4779,
"step": 85
},
{
"epoch": 0.19235907026449373,
"grad_norm": 23.370663292540115,
"learning_rate": 4.909907151739633e-07,
"logits": -2.552422046661377,
"logps": -235.0645751953125,
"loss": 0.479,
"step": 90
},
{
"epoch": 0.20304568527918782,
"grad_norm": 24.61367431074327,
"learning_rate": 4.883353326764906e-07,
"logits": -2.3946549892425537,
"logps": -295.76959228515625,
"loss": 0.4708,
"step": 95
},
{
"epoch": 0.2137323002938819,
"grad_norm": 25.53290336307927,
"learning_rate": 4.853466166652258e-07,
"logits": -2.442629098892212,
"logps": -241.5957794189453,
"loss": 0.4609,
"step": 100
},
{
"epoch": 0.224418915308576,
"grad_norm": 22.113276785856915,
"learning_rate": 4.820287471297597e-07,
"logits": -2.474640369415283,
"logps": -290.6409912109375,
"loss": 0.4479,
"step": 105
},
{
"epoch": 0.2351055303232701,
"grad_norm": 21.079178522203513,
"learning_rate": 4.783863644106502e-07,
"logits": -2.515345335006714,
"logps": -306.5741882324219,
"loss": 0.4529,
"step": 110
},
{
"epoch": 0.2457921453379642,
"grad_norm": 21.06551183306965,
"learning_rate": 4.752422169756047e-07,
"logits": -2.4756886959075928,
"logps": -281.9891662597656,
"loss": 0.4696,
"step": 115
},
{
"epoch": 0.2564787603526583,
"grad_norm": 22.63241646221903,
"learning_rate": 4.710288483761524e-07,
"logits": -2.4553544521331787,
"logps": -262.58184814453125,
"loss": 0.4712,
"step": 120
},
{
"epoch": 0.2671653753673524,
"grad_norm": 24.782221235525792,
"learning_rate": 4.6650635094610966e-07,
"logits": -2.3455586433410645,
"logps": -286.12481689453125,
"loss": 0.439,
"step": 125
},
{
"epoch": 0.2778519903820465,
"grad_norm": 22.240996330864814,
"learning_rate": 4.6168104980707103e-07,
"logits": -2.454857349395752,
"logps": -278.26177978515625,
"loss": 0.4606,
"step": 130
},
{
"epoch": 0.2885386053967406,
"grad_norm": 23.029344848112892,
"learning_rate": 4.565596935789987e-07,
"logits": -2.5119175910949707,
"logps": -271.8935546875,
"loss": 0.4583,
"step": 135
},
{
"epoch": 0.2992252204114347,
"grad_norm": 23.114231775090644,
"learning_rate": 4.511494449416671e-07,
"logits": -2.4763400554656982,
"logps": -305.2304992675781,
"loss": 0.4444,
"step": 140
},
{
"epoch": 0.30991183542612877,
"grad_norm": 22.307052406807973,
"learning_rate": 4.4545787061700746e-07,
"logits": -2.279383659362793,
"logps": -314.3868103027344,
"loss": 0.4433,
"step": 145
},
{
"epoch": 0.32059845044082286,
"grad_norm": 22.621940716026984,
"learning_rate": 4.394929307863632e-07,
"logits": -2.4016177654266357,
"logps": -280.4060363769531,
"loss": 0.4603,
"step": 150
},
{
"epoch": 0.33128506545551695,
"grad_norm": 27.05574310923442,
"learning_rate": 4.332629679574565e-07,
"logits": -2.4656858444213867,
"logps": -310.09173583984375,
"loss": 0.4505,
"step": 155
},
{
"epoch": 0.34197168047021104,
"grad_norm": 23.53583613618808,
"learning_rate": 4.2677669529663686e-07,
"logits": -2.337423801422119,
"logps": -317.040771484375,
"loss": 0.4395,
"step": 160
},
{
"epoch": 0.3526582954849052,
"grad_norm": 23.875484303552483,
"learning_rate": 4.200431844427298e-07,
"logits": -2.5105397701263428,
"logps": -305.39630126953125,
"loss": 0.4456,
"step": 165
},
{
"epoch": 0.36334491049959927,
"grad_norm": 22.450514799974165,
"learning_rate": 4.130718528195303e-07,
"logits": -2.3933465480804443,
"logps": -275.6124267578125,
"loss": 0.443,
"step": 170
},
{
"epoch": 0.37403152551429336,
"grad_norm": 23.74049455633747,
"learning_rate": 4.058724504646834e-07,
"logits": -2.3835606575012207,
"logps": -277.36956787109375,
"loss": 0.4339,
"step": 175
},
{
"epoch": 0.38471814052898745,
"grad_norm": 23.64677950778481,
"learning_rate": 3.9845504639337535e-07,
"logits": -2.4487411975860596,
"logps": -290.956298828125,
"loss": 0.4573,
"step": 180
},
{
"epoch": 0.39540475554368154,
"grad_norm": 21.598937457276417,
"learning_rate": 3.908300145159055e-07,
"logits": -2.413297176361084,
"logps": -235.90109252929688,
"loss": 0.4279,
"step": 185
},
{
"epoch": 0.40609137055837563,
"grad_norm": 23.712081510750036,
"learning_rate": 3.8300801912883414e-07,
"logits": -2.4640491008758545,
"logps": -287.1435546875,
"loss": 0.4354,
"step": 190
},
{
"epoch": 0.4167779855730697,
"grad_norm": 44.372947453047885,
"learning_rate": 3.75e-07,
"logits": -2.3341147899627686,
"logps": -343.4080505371094,
"loss": 0.4329,
"step": 195
},
{
"epoch": 0.4274646005877638,
"grad_norm": 23.012230002196524,
"learning_rate": 3.668171570682655e-07,
"logits": -2.340860366821289,
"logps": -262.5423889160156,
"loss": 0.431,
"step": 200
},
{
"epoch": 0.4274646005877638,
"eval_logits": -2.3263635635375977,
"eval_logps": -301.7860412597656,
"eval_loss": 0.4314996898174286,
"eval_runtime": 507.0323,
"eval_samples_per_second": 3.881,
"eval_steps_per_second": 0.243,
"step": 200
},
{
"epoch": 0.4381512156024579,
"grad_norm": 22.412220925103565,
"learning_rate": 3.584709347793895e-07,
"logits": -2.2411904335021973,
"logps": -277.8357849121094,
"loss": 0.4425,
"step": 205
},
{
"epoch": 0.448837830617152,
"grad_norm": 23.021158257266347,
"learning_rate": 3.499730060799352e-07,
"logits": -2.1322011947631836,
"logps": -289.6743469238281,
"loss": 0.4292,
"step": 210
},
{
"epoch": 0.45952444563184613,
"grad_norm": 23.724850433619796,
"learning_rate": 3.413352560915988e-07,
"logits": -2.0033411979675293,
"logps": -272.5525207519531,
"loss": 0.4121,
"step": 215
},
{
"epoch": 0.4702110606465402,
"grad_norm": 21.651299269242514,
"learning_rate": 3.325697654887918e-07,
"logits": -2.051339626312256,
"logps": -235.84262084960938,
"loss": 0.4222,
"step": 220
},
{
"epoch": 0.4808976756612343,
"grad_norm": 25.440752125826293,
"learning_rate": 3.2368879360272606e-07,
"logits": -2.34849214553833,
"logps": -312.7589111328125,
"loss": 0.424,
"step": 225
},
{
"epoch": 0.4915842906759284,
"grad_norm": 21.05394354899474,
"learning_rate": 3.147047612756302e-07,
"logits": -2.335400104522705,
"logps": -325.1053161621094,
"loss": 0.4261,
"step": 230
},
{
"epoch": 0.5022709056906225,
"grad_norm": 22.293568633261653,
"learning_rate": 3.056302334890786e-07,
"logits": -2.2490334510803223,
"logps": -266.56219482421875,
"loss": 0.4322,
"step": 235
},
{
"epoch": 0.5129575207053166,
"grad_norm": 22.256364087698653,
"learning_rate": 2.964779017907287e-07,
"logits": -2.16744065284729,
"logps": -309.4420471191406,
"loss": 0.4222,
"step": 240
},
{
"epoch": 0.5236441357200107,
"grad_norm": 25.954148860680444,
"learning_rate": 2.872605665440436e-07,
"logits": -2.451477527618408,
"logps": -282.71319580078125,
"loss": 0.4096,
"step": 245
},
{
"epoch": 0.5343307507347048,
"grad_norm": 21.39918862577877,
"learning_rate": 2.7799111902582693e-07,
"logits": -2.6052417755126953,
"logps": -273.9092102050781,
"loss": 0.4155,
"step": 250
},
{
"epoch": 0.5450173657493989,
"grad_norm": 24.967131244901736,
"learning_rate": 2.6868252339660607e-07,
"logits": -2.225090503692627,
"logps": -280.70782470703125,
"loss": 0.416,
"step": 255
},
{
"epoch": 0.555703980764093,
"grad_norm": 24.077110482464793,
"learning_rate": 2.593477985690815e-07,
"logits": -2.2254879474639893,
"logps": -286.8822937011719,
"loss": 0.4099,
"step": 260
},
{
"epoch": 0.566390595778787,
"grad_norm": 22.910756064363806,
"learning_rate": 2.5e-07,
"logits": -2.3557419776916504,
"logps": -280.0397033691406,
"loss": 0.4125,
"step": 265
},
{
"epoch": 0.5770772107934812,
"grad_norm": 27.974836628803857,
"learning_rate": 2.406522014309186e-07,
"logits": -2.041544198989868,
"logps": -273.45318603515625,
"loss": 0.4291,
"step": 270
},
{
"epoch": 0.5877638258081752,
"grad_norm": 22.765553156216026,
"learning_rate": 2.3131747660339394e-07,
"logits": -2.3228344917297363,
"logps": -282.9244079589844,
"loss": 0.4141,
"step": 275
},
{
"epoch": 0.5984504408228694,
"grad_norm": 22.498986802146685,
"learning_rate": 2.2200888097417302e-07,
"logits": -1.89029061794281,
"logps": -315.2174072265625,
"loss": 0.42,
"step": 280
},
{
"epoch": 0.6091370558375635,
"grad_norm": 23.58891369147268,
"learning_rate": 2.1273943345595635e-07,
"logits": -1.903066873550415,
"logps": -261.4866638183594,
"loss": 0.4062,
"step": 285
},
{
"epoch": 0.6198236708522575,
"grad_norm": 21.26751900271618,
"learning_rate": 2.0352209820927135e-07,
"logits": -2.048973560333252,
"logps": -312.31048583984375,
"loss": 0.4142,
"step": 290
},
{
"epoch": 0.6305102858669517,
"grad_norm": 22.071346201309105,
"learning_rate": 1.9436976651092142e-07,
"logits": -1.7636706829071045,
"logps": -257.8403625488281,
"loss": 0.4124,
"step": 295
},
{
"epoch": 0.6411969008816457,
"grad_norm": 21.12974848393809,
"learning_rate": 1.8529523872436977e-07,
"logits": -2.014808177947998,
"logps": -279.1208190917969,
"loss": 0.4112,
"step": 300
},
{
"epoch": 0.6518835158963399,
"grad_norm": 23.594905022803008,
"learning_rate": 1.763112063972739e-07,
"logits": -2.050807237625122,
"logps": -296.3996887207031,
"loss": 0.4112,
"step": 305
},
{
"epoch": 0.6625701309110339,
"grad_norm": 21.591511525006933,
"learning_rate": 1.674302345112083e-07,
"logits": -1.986161470413208,
"logps": -275.21893310546875,
"loss": 0.419,
"step": 310
},
{
"epoch": 0.673256745925728,
"grad_norm": 20.6885856376684,
"learning_rate": 1.5866474390840124e-07,
"logits": -2.2522599697113037,
"logps": -319.27911376953125,
"loss": 0.4106,
"step": 315
},
{
"epoch": 0.6839433609404221,
"grad_norm": 22.909433086566732,
"learning_rate": 1.500269939200648e-07,
"logits": -2.207468032836914,
"logps": -266.585205078125,
"loss": 0.3909,
"step": 320
},
{
"epoch": 0.6946299759551162,
"grad_norm": 22.282735602270293,
"learning_rate": 1.4152906522061047e-07,
"logits": -2.35794997215271,
"logps": -316.83807373046875,
"loss": 0.4048,
"step": 325
},
{
"epoch": 0.7053165909698104,
"grad_norm": 23.16656248515272,
"learning_rate": 1.3318284293173449e-07,
"logits": -2.242034912109375,
"logps": -307.3307189941406,
"loss": 0.4042,
"step": 330
},
{
"epoch": 0.7160032059845044,
"grad_norm": 21.510083315851972,
"learning_rate": 1.2500000000000005e-07,
"logits": -1.9815715551376343,
"logps": -338.7240905761719,
"loss": 0.3934,
"step": 335
},
{
"epoch": 0.7266898209991985,
"grad_norm": 21.492689286630217,
"learning_rate": 1.1699198087116588e-07,
"logits": -2.1296348571777344,
"logps": -271.06878662109375,
"loss": 0.4011,
"step": 340
},
{
"epoch": 0.7373764360138926,
"grad_norm": 23.01568750995505,
"learning_rate": 1.0916998548409447e-07,
"logits": -2.244135618209839,
"logps": -310.14501953125,
"loss": 0.412,
"step": 345
},
{
"epoch": 0.7480630510285867,
"grad_norm": 22.53687051047034,
"learning_rate": 1.0154495360662463e-07,
"logits": -2.1778364181518555,
"logps": -269.3367614746094,
"loss": 0.3901,
"step": 350
},
{
"epoch": 0.7587496660432808,
"grad_norm": 23.184829487659847,
"learning_rate": 9.412754953531663e-08,
"logits": -2.286181926727295,
"logps": -271.3564453125,
"loss": 0.4172,
"step": 355
},
{
"epoch": 0.7694362810579749,
"grad_norm": 21.040610617000496,
"learning_rate": 8.692814718046978e-08,
"logits": -2.055994987487793,
"logps": -304.63885498046875,
"loss": 0.4083,
"step": 360
},
{
"epoch": 0.7801228960726689,
"grad_norm": 23.09698562414689,
"learning_rate": 7.99568155572701e-08,
"logits": -2.213494062423706,
"logps": -277.4697570800781,
"loss": 0.4155,
"step": 365
},
{
"epoch": 0.7908095110873631,
"grad_norm": 21.236358981933506,
"learning_rate": 7.322330470336313e-08,
"logits": -2.0819058418273926,
"logps": -281.2724914550781,
"loss": 0.4037,
"step": 370
},
{
"epoch": 0.8014961261020572,
"grad_norm": 26.06067361683024,
"learning_rate": 6.673703204254347e-08,
"logits": -2.030791997909546,
"logps": -284.4011535644531,
"loss": 0.4121,
"step": 375
},
{
"epoch": 0.8121827411167513,
"grad_norm": 20.224636468750884,
"learning_rate": 6.050706921363672e-08,
"logits": -1.9256960153579712,
"logps": -294.9930725097656,
"loss": 0.4027,
"step": 380
},
{
"epoch": 0.8228693561314454,
"grad_norm": 24.14425420434055,
"learning_rate": 5.454212938299255e-08,
"logits": -1.7693021297454834,
"logps": -286.45452880859375,
"loss": 0.4136,
"step": 385
},
{
"epoch": 0.8335559711461394,
"grad_norm": 22.762479440937984,
"learning_rate": 4.885055505833291e-08,
"logits": -1.9304530620574951,
"logps": -291.02880859375,
"loss": 0.3988,
"step": 390
},
{
"epoch": 0.8442425861608336,
"grad_norm": 21.312231537009946,
"learning_rate": 4.3440306421001324e-08,
"logits": -2.025493860244751,
"logps": -268.2240905761719,
"loss": 0.4083,
"step": 395
},
{
"epoch": 0.8549292011755276,
"grad_norm": 23.03482969018928,
"learning_rate": 3.831895019292897e-08,
"logits": -2.1968250274658203,
"logps": -294.18292236328125,
"loss": 0.4118,
"step": 400
},
{
"epoch": 0.8549292011755276,
"eval_logits": -2.2095947265625,
"eval_logps": -310.0013122558594,
"eval_loss": 0.40863949060440063,
"eval_runtime": 537.6528,
"eval_samples_per_second": 3.66,
"eval_steps_per_second": 0.229,
"step": 400
},
{
"epoch": 0.8656158161902218,
"grad_norm": 21.5208583380133,
"learning_rate": 3.349364905389032e-08,
"logits": -2.188328981399536,
"logps": -306.3011169433594,
"loss": 0.4113,
"step": 405
},
{
"epoch": 0.8763024312049158,
"grad_norm": 20.54362762134308,
"learning_rate": 2.8971151623847584e-08,
"logits": -1.99441659450531,
"logps": -286.16876220703125,
"loss": 0.4171,
"step": 410
},
{
"epoch": 0.88698904621961,
"grad_norm": 22.86656655422917,
"learning_rate": 2.475778302439524e-08,
"logits": -2.1187539100646973,
"logps": -266.64959716796875,
"loss": 0.4053,
"step": 415
},
{
"epoch": 0.897675661234304,
"grad_norm": 22.18589761074244,
"learning_rate": 2.085943603250595e-08,
"logits": -2.3276543617248535,
"logps": -284.54974365234375,
"loss": 0.4153,
"step": 420
},
{
"epoch": 0.9083622762489981,
"grad_norm": 20.63324288656286,
"learning_rate": 1.7281562838948966e-08,
"logits": -2.1507515907287598,
"logps": -281.44158935546875,
"loss": 0.4074,
"step": 425
},
{
"epoch": 0.9190488912636923,
"grad_norm": 20.358726620730323,
"learning_rate": 1.4029167422908105e-08,
"logits": -2.1837050914764404,
"logps": -310.3664245605469,
"loss": 0.3928,
"step": 430
},
{
"epoch": 0.9297355062783863,
"grad_norm": 21.126634047747054,
"learning_rate": 1.1106798553464802e-08,
"logits": -2.260655164718628,
"logps": -281.1075439453125,
"loss": 0.413,
"step": 435
},
{
"epoch": 0.9404221212930804,
"grad_norm": 23.274529002886094,
"learning_rate": 8.518543427732949e-09,
"logits": -2.16852068901062,
"logps": -262.402099609375,
"loss": 0.3922,
"step": 440
},
{
"epoch": 0.9511087363077745,
"grad_norm": 26.435895904221407,
"learning_rate": 6.268021954544095e-09,
"logits": -2.1322624683380127,
"logps": -324.4895324707031,
"loss": 0.4172,
"step": 445
},
{
"epoch": 0.9617953513224686,
"grad_norm": 24.896799022635523,
"learning_rate": 4.358381691677931e-09,
"logits": -2.1141066551208496,
"logps": -289.3265380859375,
"loss": 0.4068,
"step": 450
},
{
"epoch": 0.9724819663371627,
"grad_norm": 21.21043979653372,
"learning_rate": 2.7922934437178692e-09,
"logits": -1.9867866039276123,
"logps": -283.8013000488281,
"loss": 0.391,
"step": 455
},
{
"epoch": 0.9831685813518568,
"grad_norm": 21.01754870372027,
"learning_rate": 1.5719475266893489e-09,
"logits": -2.012530565261841,
"logps": -245.16806030273438,
"loss": 0.4041,
"step": 460
},
{
"epoch": 0.9938551963665508,
"grad_norm": 22.43911437197679,
"learning_rate": 6.990507047049676e-10,
"logits": -2.1750540733337402,
"logps": -284.3227233886719,
"loss": 0.4094,
"step": 465
},
{
"epoch": 0.9981298423724285,
"step": 467,
"total_flos": 0.0,
"train_loss": 0.4667486662017201,
"train_runtime": 41187.6602,
"train_samples_per_second": 1.454,
"train_steps_per_second": 0.011
}
],
"logging_steps": 5,
"max_steps": 467,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}