smollm-360M-instruct-v2 / trainer_state.json
loubnabnl's picture
loubnabnl HF staff
Model save
7b5ddad verified
raw
history blame
No virus
29.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 819,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001221001221001221,
"grad_norm": 1.629501933145264,
"learning_rate": 1.2195121951219513e-05,
"loss": 1.3788,
"step": 1
},
{
"epoch": 0.006105006105006105,
"grad_norm": 1.5408249987691998,
"learning_rate": 6.097560975609756e-05,
"loss": 1.3922,
"step": 5
},
{
"epoch": 0.01221001221001221,
"grad_norm": 1.6964330573646578,
"learning_rate": 0.00012195121951219512,
"loss": 1.3357,
"step": 10
},
{
"epoch": 0.018315018315018316,
"grad_norm": 0.5877948098047541,
"learning_rate": 0.00018292682926829268,
"loss": 1.2292,
"step": 15
},
{
"epoch": 0.02442002442002442,
"grad_norm": 0.41277023465903623,
"learning_rate": 0.00024390243902439024,
"loss": 1.1569,
"step": 20
},
{
"epoch": 0.030525030525030524,
"grad_norm": 0.28894210212831173,
"learning_rate": 0.0003048780487804878,
"loss": 1.1017,
"step": 25
},
{
"epoch": 0.03663003663003663,
"grad_norm": 0.15704693527032987,
"learning_rate": 0.00036585365853658537,
"loss": 1.0689,
"step": 30
},
{
"epoch": 0.042735042735042736,
"grad_norm": 0.149497877408929,
"learning_rate": 0.0004268292682926829,
"loss": 1.0597,
"step": 35
},
{
"epoch": 0.04884004884004884,
"grad_norm": 0.17229438336995273,
"learning_rate": 0.0004878048780487805,
"loss": 1.0497,
"step": 40
},
{
"epoch": 0.054945054945054944,
"grad_norm": 0.1350927997659262,
"learning_rate": 0.0005487804878048781,
"loss": 1.0345,
"step": 45
},
{
"epoch": 0.06105006105006105,
"grad_norm": 0.1316879612941552,
"learning_rate": 0.0006097560975609756,
"loss": 1.03,
"step": 50
},
{
"epoch": 0.06715506715506715,
"grad_norm": 0.12294647268470198,
"learning_rate": 0.0006707317073170732,
"loss": 1.0182,
"step": 55
},
{
"epoch": 0.07326007326007326,
"grad_norm": 0.13768875749553208,
"learning_rate": 0.0007317073170731707,
"loss": 1.018,
"step": 60
},
{
"epoch": 0.07936507936507936,
"grad_norm": 0.1301026880176555,
"learning_rate": 0.0007926829268292683,
"loss": 1.0148,
"step": 65
},
{
"epoch": 0.08547008547008547,
"grad_norm": 0.15052001326359865,
"learning_rate": 0.0008536585365853659,
"loss": 1.0074,
"step": 70
},
{
"epoch": 0.09157509157509157,
"grad_norm": 0.15206771365197816,
"learning_rate": 0.0009146341463414635,
"loss": 1.0115,
"step": 75
},
{
"epoch": 0.09768009768009768,
"grad_norm": 0.12918729689877004,
"learning_rate": 0.000975609756097561,
"loss": 1.0034,
"step": 80
},
{
"epoch": 0.10378510378510379,
"grad_norm": 0.1617114692792796,
"learning_rate": 0.000999959117130623,
"loss": 1.0057,
"step": 85
},
{
"epoch": 0.10989010989010989,
"grad_norm": 0.15209285146918505,
"learning_rate": 0.000999709301584265,
"loss": 0.9982,
"step": 90
},
{
"epoch": 0.115995115995116,
"grad_norm": 0.15560054183114133,
"learning_rate": 0.0009992324965361792,
"loss": 0.9988,
"step": 95
},
{
"epoch": 0.1221001221001221,
"grad_norm": 0.12326520201606891,
"learning_rate": 0.0009985289185717684,
"loss": 0.9868,
"step": 100
},
{
"epoch": 0.1282051282051282,
"grad_norm": 0.13506004498847854,
"learning_rate": 0.000997598887286467,
"loss": 0.99,
"step": 105
},
{
"epoch": 0.1343101343101343,
"grad_norm": 0.12846122121998493,
"learning_rate": 0.000996442825140569,
"loss": 0.981,
"step": 110
},
{
"epoch": 0.14041514041514042,
"grad_norm": 0.15572445817151304,
"learning_rate": 0.0009950612572673255,
"loss": 0.9849,
"step": 115
},
{
"epoch": 0.14652014652014653,
"grad_norm": 0.14660132838678952,
"learning_rate": 0.0009934548112344088,
"loss": 0.9883,
"step": 120
},
{
"epoch": 0.15262515262515264,
"grad_norm": 0.11705785841462868,
"learning_rate": 0.0009916242167588433,
"loss": 0.9879,
"step": 125
},
{
"epoch": 0.15873015873015872,
"grad_norm": 0.13642851783373758,
"learning_rate": 0.0009895703053755364,
"loss": 0.9798,
"step": 130
},
{
"epoch": 0.16483516483516483,
"grad_norm": 0.12472826943952521,
"learning_rate": 0.0009872940100595598,
"loss": 0.977,
"step": 135
},
{
"epoch": 0.17094017094017094,
"grad_norm": 0.119735339518556,
"learning_rate": 0.0009847963648023522,
"loss": 0.9758,
"step": 140
},
{
"epoch": 0.17704517704517705,
"grad_norm": 0.12634642504923346,
"learning_rate": 0.000982078504142035,
"loss": 0.9637,
"step": 145
},
{
"epoch": 0.18315018315018314,
"grad_norm": 0.1313401316215934,
"learning_rate": 0.000979141662648057,
"loss": 0.9695,
"step": 150
},
{
"epoch": 0.18925518925518925,
"grad_norm": 0.10869766647138489,
"learning_rate": 0.0009759871743604004,
"loss": 0.9657,
"step": 155
},
{
"epoch": 0.19536019536019536,
"grad_norm": 0.11697532103244576,
"learning_rate": 0.0009726164721835996,
"loss": 0.9612,
"step": 160
},
{
"epoch": 0.20146520146520147,
"grad_norm": 0.13503159530521566,
"learning_rate": 0.0009690310872358572,
"loss": 0.9729,
"step": 165
},
{
"epoch": 0.20757020757020758,
"grad_norm": 0.14747150622124625,
"learning_rate": 0.0009652326481535434,
"loss": 0.9611,
"step": 170
},
{
"epoch": 0.21367521367521367,
"grad_norm": 0.15447081723038833,
"learning_rate": 0.0009612228803513976,
"loss": 0.9623,
"step": 175
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.15215752710313343,
"learning_rate": 0.0009570036052387725,
"loss": 0.9573,
"step": 180
},
{
"epoch": 0.2258852258852259,
"grad_norm": 0.13642796511864827,
"learning_rate": 0.0009525767393922706,
"loss": 0.9461,
"step": 185
},
{
"epoch": 0.231990231990232,
"grad_norm": 0.10883830363461129,
"learning_rate": 0.0009479442936851526,
"loss": 0.949,
"step": 190
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.130236635303521,
"learning_rate": 0.0009431083723739124,
"loss": 0.9366,
"step": 195
},
{
"epoch": 0.2442002442002442,
"grad_norm": 0.10775194472971555,
"learning_rate": 0.0009380711721424326,
"loss": 0.9418,
"step": 200
},
{
"epoch": 0.2503052503052503,
"grad_norm": 0.11905614657094514,
"learning_rate": 0.0009328349811041565,
"loss": 0.9428,
"step": 205
},
{
"epoch": 0.2564102564102564,
"grad_norm": 0.11842679938361055,
"learning_rate": 0.0009274021777627277,
"loss": 0.9506,
"step": 210
},
{
"epoch": 0.2625152625152625,
"grad_norm": 0.13876037195742963,
"learning_rate": 0.0009217752299315725,
"loss": 0.9276,
"step": 215
},
{
"epoch": 0.2686202686202686,
"grad_norm": 0.21581360420007165,
"learning_rate": 0.0009159566936129111,
"loss": 0.9512,
"step": 220
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.15887906930846354,
"learning_rate": 0.0009099492118367123,
"loss": 0.9457,
"step": 225
},
{
"epoch": 0.28083028083028083,
"grad_norm": 0.13609412278446711,
"learning_rate": 0.0009037555134601149,
"loss": 0.929,
"step": 230
},
{
"epoch": 0.2869352869352869,
"grad_norm": 0.14025295501919074,
"learning_rate": 0.000897378411927864,
"loss": 0.9388,
"step": 235
},
{
"epoch": 0.29304029304029305,
"grad_norm": 0.10506266135979772,
"learning_rate": 0.0008908208039943213,
"loss": 0.9288,
"step": 240
},
{
"epoch": 0.29914529914529914,
"grad_norm": 0.10813079919343101,
"learning_rate": 0.0008840856684076366,
"loss": 0.9175,
"step": 245
},
{
"epoch": 0.3052503052503053,
"grad_norm": 0.15791808529464318,
"learning_rate": 0.0008771760645566706,
"loss": 0.9246,
"step": 250
},
{
"epoch": 0.31135531135531136,
"grad_norm": 2.0370648707444725,
"learning_rate": 0.000870095131081289,
"loss": 0.9449,
"step": 255
},
{
"epoch": 0.31746031746031744,
"grad_norm": 0.1311182760192241,
"learning_rate": 0.0008628460844466573,
"loss": 0.9253,
"step": 260
},
{
"epoch": 0.3235653235653236,
"grad_norm": 0.14312834396261354,
"learning_rate": 0.0008554322174821833,
"loss": 0.9278,
"step": 265
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.11428839304992285,
"learning_rate": 0.0008478568978857722,
"loss": 0.9283,
"step": 270
},
{
"epoch": 0.33577533577533575,
"grad_norm": 0.10818382932069733,
"learning_rate": 0.0008401235666940728,
"loss": 0.9284,
"step": 275
},
{
"epoch": 0.3418803418803419,
"grad_norm": 0.11567783999993994,
"learning_rate": 0.0008322357367194109,
"loss": 0.9182,
"step": 280
},
{
"epoch": 0.34798534798534797,
"grad_norm": 0.10294827848523892,
"learning_rate": 0.0008241969909541184,
"loss": 0.9139,
"step": 285
},
{
"epoch": 0.3540903540903541,
"grad_norm": 0.13672098058204363,
"learning_rate": 0.0008160109809429835,
"loss": 0.9217,
"step": 290
},
{
"epoch": 0.3601953601953602,
"grad_norm": 0.13807363255445734,
"learning_rate": 0.0008076814251245613,
"loss": 0.9153,
"step": 295
},
{
"epoch": 0.3663003663003663,
"grad_norm": 0.1034627930470886,
"learning_rate": 0.0007992121071421001,
"loss": 0.9287,
"step": 300
},
{
"epoch": 0.3724053724053724,
"grad_norm": 0.1294524989308333,
"learning_rate": 0.0007906068741248461,
"loss": 0.9138,
"step": 305
},
{
"epoch": 0.3785103785103785,
"grad_norm": 0.09977607943662724,
"learning_rate": 0.0007818696349405123,
"loss": 0.9192,
"step": 310
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.12394897831693975,
"learning_rate": 0.0007730043584197021,
"loss": 0.9097,
"step": 315
},
{
"epoch": 0.3907203907203907,
"grad_norm": 0.11903071018799159,
"learning_rate": 0.0007640150715530953,
"loss": 0.9029,
"step": 320
},
{
"epoch": 0.3968253968253968,
"grad_norm": 0.13131459819762914,
"learning_rate": 0.0007549058576622157,
"loss": 0.9163,
"step": 325
},
{
"epoch": 0.40293040293040294,
"grad_norm": 0.10951470254516554,
"learning_rate": 0.0007456808545446102,
"loss": 0.9077,
"step": 330
},
{
"epoch": 0.409035409035409,
"grad_norm": 0.09719747300206659,
"learning_rate": 0.0007363442525942826,
"loss": 0.9041,
"step": 335
},
{
"epoch": 0.41514041514041516,
"grad_norm": 0.11238853337044794,
"learning_rate": 0.0007269002928982366,
"loss": 0.9071,
"step": 340
},
{
"epoch": 0.42124542124542125,
"grad_norm": 0.11751659500081639,
"learning_rate": 0.0007173532653099911,
"loss": 0.8996,
"step": 345
},
{
"epoch": 0.42735042735042733,
"grad_norm": 0.11145184489879807,
"learning_rate": 0.0007077075065009433,
"loss": 0.9195,
"step": 350
},
{
"epoch": 0.43345543345543347,
"grad_norm": 0.12082613464058725,
"learning_rate": 0.0006979673979904665,
"loss": 0.9116,
"step": 355
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.11356706546924504,
"learning_rate": 0.0006881373641556346,
"loss": 0.8957,
"step": 360
},
{
"epoch": 0.4456654456654457,
"grad_norm": 0.10550995720573654,
"learning_rate": 0.0006782218702214797,
"loss": 0.8967,
"step": 365
},
{
"epoch": 0.4517704517704518,
"grad_norm": 0.10623969934852533,
"learning_rate": 0.000668225420232694,
"loss": 0.8956,
"step": 370
},
{
"epoch": 0.45787545787545786,
"grad_norm": 0.09920329673826833,
"learning_rate": 0.0006581525550076989,
"loss": 0.8944,
"step": 375
},
{
"epoch": 0.463980463980464,
"grad_norm": 0.11720468030354501,
"learning_rate": 0.0006480078500760096,
"loss": 0.9046,
"step": 380
},
{
"epoch": 0.4700854700854701,
"grad_norm": 0.11313634978608422,
"learning_rate": 0.0006377959135998322,
"loss": 0.8989,
"step": 385
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.10072650234577825,
"learning_rate": 0.0006275213842808383,
"loss": 0.892,
"step": 390
},
{
"epoch": 0.4822954822954823,
"grad_norm": 0.16427063748319404,
"learning_rate": 0.0006171889292530655,
"loss": 0.8923,
"step": 395
},
{
"epoch": 0.4884004884004884,
"grad_norm": 0.09685334068579918,
"learning_rate": 0.0006068032419629059,
"loss": 0.9026,
"step": 400
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.09871518532748647,
"learning_rate": 0.0005963690400371386,
"loss": 0.8923,
"step": 405
},
{
"epoch": 0.5006105006105006,
"grad_norm": 0.10299185446487583,
"learning_rate": 0.0005858910631399817,
"loss": 0.8934,
"step": 410
},
{
"epoch": 0.5067155067155067,
"grad_norm": 0.12753918898402714,
"learning_rate": 0.0005753740708201315,
"loss": 0.8855,
"step": 415
},
{
"epoch": 0.5128205128205128,
"grad_norm": 0.12750913259702365,
"learning_rate": 0.0005648228403487712,
"loss": 0.8911,
"step": 420
},
{
"epoch": 0.518925518925519,
"grad_norm": 0.1274157898333562,
"learning_rate": 0.0005542421645495279,
"loss": 0.8943,
"step": 425
},
{
"epoch": 0.525030525030525,
"grad_norm": 0.1771370225873098,
"learning_rate": 0.0005436368496213656,
"loss": 0.8823,
"step": 430
},
{
"epoch": 0.5311355311355311,
"grad_norm": 0.11030418227435798,
"learning_rate": 0.0005330117129554028,
"loss": 0.8855,
"step": 435
},
{
"epoch": 0.5372405372405372,
"grad_norm": 0.10914376299241067,
"learning_rate": 0.0005223715809466454,
"loss": 0.8896,
"step": 440
},
{
"epoch": 0.5433455433455433,
"grad_norm": 0.105079916260377,
"learning_rate": 0.0005117212868016303,
"loss": 0.8829,
"step": 445
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.09038201415584558,
"learning_rate": 0.0005010656683429746,
"loss": 0.8875,
"step": 450
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.10067313064197672,
"learning_rate": 0.0004904095658118283,
"loss": 0.8744,
"step": 455
},
{
"epoch": 0.5616605616605617,
"grad_norm": 0.09677024273019037,
"learning_rate": 0.0004797578196692281,
"loss": 0.886,
"step": 460
},
{
"epoch": 0.5677655677655677,
"grad_norm": 0.1010893098260069,
"learning_rate": 0.00046911526839735093,
"loss": 0.8914,
"step": 465
},
{
"epoch": 0.5738705738705738,
"grad_norm": 0.1016579599480523,
"learning_rate": 0.0004584867463016671,
"loss": 0.8821,
"step": 470
},
{
"epoch": 0.57997557997558,
"grad_norm": 0.10945662085952625,
"learning_rate": 0.00044787708131499104,
"loss": 0.8698,
"step": 475
},
{
"epoch": 0.5860805860805861,
"grad_norm": 0.11552520773703621,
"learning_rate": 0.0004372910928044249,
"loss": 0.8761,
"step": 480
},
{
"epoch": 0.5921855921855922,
"grad_norm": 0.10307537673383473,
"learning_rate": 0.00042673358938219544,
"loss": 0.8743,
"step": 485
},
{
"epoch": 0.5982905982905983,
"grad_norm": 0.09087654917065777,
"learning_rate": 0.00041620936672137393,
"loss": 0.8758,
"step": 490
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.09850681791384697,
"learning_rate": 0.00040572320537747656,
"loss": 0.8728,
"step": 495
},
{
"epoch": 0.6105006105006106,
"grad_norm": 0.0992318941745092,
"learning_rate": 0.0003952798686169279,
"loss": 0.8876,
"step": 500
},
{
"epoch": 0.6166056166056166,
"grad_norm": 0.11916562636988841,
"learning_rate": 0.00038488410025338133,
"loss": 0.8799,
"step": 505
},
{
"epoch": 0.6227106227106227,
"grad_norm": 0.1102419615016477,
"learning_rate": 0.00037454062249287477,
"loss": 0.8748,
"step": 510
},
{
"epoch": 0.6288156288156288,
"grad_norm": 0.08376386530120562,
"learning_rate": 0.0003642541337887999,
"loss": 0.8782,
"step": 515
},
{
"epoch": 0.6349206349206349,
"grad_norm": 0.12657317900189496,
"learning_rate": 0.00035402930670766296,
"loss": 0.8634,
"step": 520
},
{
"epoch": 0.6410256410256411,
"grad_norm": 0.08636544191631769,
"learning_rate": 0.00034387078580660346,
"loss": 0.8649,
"step": 525
},
{
"epoch": 0.6471306471306472,
"grad_norm": 0.11450664347824203,
"learning_rate": 0.00033378318552363664,
"loss": 0.8655,
"step": 530
},
{
"epoch": 0.6532356532356532,
"grad_norm": 0.09878839008559168,
"learning_rate": 0.0003237710880815756,
"loss": 0.8751,
"step": 535
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.09052163701573301,
"learning_rate": 0.00031383904140658986,
"loss": 0.8628,
"step": 540
},
{
"epoch": 0.6654456654456654,
"grad_norm": 0.11374009475099364,
"learning_rate": 0.0003039915570623396,
"loss": 0.8702,
"step": 545
},
{
"epoch": 0.6715506715506715,
"grad_norm": 0.09096994547360551,
"learning_rate": 0.0002942331082006308,
"loss": 0.8606,
"step": 550
},
{
"epoch": 0.6776556776556777,
"grad_norm": 0.09829912484792226,
"learning_rate": 0.00028456812752951485,
"loss": 0.8613,
"step": 555
},
{
"epoch": 0.6837606837606838,
"grad_norm": 0.09918852355520438,
"learning_rate": 0.0002750010052997635,
"loss": 0.8651,
"step": 560
},
{
"epoch": 0.6898656898656899,
"grad_norm": 0.08703719318631921,
"learning_rate": 0.00026553608731062604,
"loss": 0.8634,
"step": 565
},
{
"epoch": 0.6959706959706959,
"grad_norm": 0.08958256157163819,
"learning_rate": 0.00025617767293578176,
"loss": 0.8569,
"step": 570
},
{
"epoch": 0.702075702075702,
"grad_norm": 0.08871557325816307,
"learning_rate": 0.0002469300131703773,
"loss": 0.861,
"step": 575
},
{
"epoch": 0.7081807081807082,
"grad_norm": 0.08822413460387163,
"learning_rate": 0.00023779730870004235,
"loss": 0.8494,
"step": 580
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.09103503298034178,
"learning_rate": 0.00022878370799275777,
"loss": 0.8645,
"step": 585
},
{
"epoch": 0.7203907203907204,
"grad_norm": 0.09228123975601896,
"learning_rate": 0.0002198933054144414,
"loss": 0.8477,
"step": 590
},
{
"epoch": 0.7264957264957265,
"grad_norm": 0.11537298722683907,
"learning_rate": 0.00021113013936911113,
"loss": 0.8559,
"step": 595
},
{
"epoch": 0.7326007326007326,
"grad_norm": 0.12133130861088819,
"learning_rate": 0.00020249819046446837,
"loss": 0.855,
"step": 600
},
{
"epoch": 0.7387057387057387,
"grad_norm": 0.08762132815499236,
"learning_rate": 0.00019400137970373356,
"loss": 0.8598,
"step": 605
},
{
"epoch": 0.7448107448107448,
"grad_norm": 0.09591981770315502,
"learning_rate": 0.00018564356670455767,
"loss": 0.8623,
"step": 610
},
{
"epoch": 0.7509157509157509,
"grad_norm": 0.08440244551483579,
"learning_rate": 0.00017742854794581785,
"loss": 0.8629,
"step": 615
},
{
"epoch": 0.757020757020757,
"grad_norm": 0.08667278234179086,
"learning_rate": 0.00016936005504309342,
"loss": 0.8593,
"step": 620
},
{
"epoch": 0.7631257631257631,
"grad_norm": 0.09670164531050161,
"learning_rate": 0.0001614417530536042,
"loss": 0.8577,
"step": 625
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.1264104247325156,
"learning_rate": 0.00015367723881138434,
"loss": 0.8617,
"step": 630
},
{
"epoch": 0.7753357753357754,
"grad_norm": 0.08224555292780504,
"learning_rate": 0.00014607003929344492,
"loss": 0.8582,
"step": 635
},
{
"epoch": 0.7814407814407814,
"grad_norm": 0.08581860223147199,
"learning_rate": 0.00013862361001766972,
"loss": 0.8528,
"step": 640
},
{
"epoch": 0.7875457875457875,
"grad_norm": 0.11871698275680992,
"learning_rate": 0.00013134133347316885,
"loss": 0.8437,
"step": 645
},
{
"epoch": 0.7936507936507936,
"grad_norm": 0.08356164913957449,
"learning_rate": 0.0001242265175838072,
"loss": 0.845,
"step": 650
},
{
"epoch": 0.7997557997557998,
"grad_norm": 0.08792516275954312,
"learning_rate": 0.00011728239420560316,
"loss": 0.8504,
"step": 655
},
{
"epoch": 0.8058608058608059,
"grad_norm": 0.0839189452160314,
"learning_rate": 0.0001105121176586793,
"loss": 0.8492,
"step": 660
},
{
"epoch": 0.811965811965812,
"grad_norm": 0.07899895706535247,
"learning_rate": 0.00010391876329443534,
"loss": 0.846,
"step": 665
},
{
"epoch": 0.818070818070818,
"grad_norm": 0.09342601930472154,
"learning_rate": 9.750532609858991e-05,
"loss": 0.8513,
"step": 670
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.08506450364705921,
"learning_rate": 9.127471933073007e-05,
"loss": 0.8551,
"step": 675
},
{
"epoch": 0.8302808302808303,
"grad_norm": 0.08591775034557968,
"learning_rate": 8.522977320098224e-05,
"loss": 0.8457,
"step": 680
},
{
"epoch": 0.8363858363858364,
"grad_norm": 0.08883987592383243,
"learning_rate": 7.937323358440934e-05,
"loss": 0.8481,
"step": 685
},
{
"epoch": 0.8424908424908425,
"grad_norm": 0.09241014546560397,
"learning_rate": 7.370776077371622e-05,
"loss": 0.842,
"step": 690
},
{
"epoch": 0.8485958485958486,
"grad_norm": 0.08970361710526982,
"learning_rate": 6.82359282708292e-05,
"loss": 0.8515,
"step": 695
},
{
"epoch": 0.8547008547008547,
"grad_norm": 0.07963231871623425,
"learning_rate": 6.296022161790149e-05,
"loss": 0.8542,
"step": 700
},
{
"epoch": 0.8608058608058609,
"grad_norm": 0.08013240789229244,
"learning_rate": 5.78830372682721e-05,
"loss": 0.8467,
"step": 705
},
{
"epoch": 0.8669108669108669,
"grad_norm": 0.0759688508386286,
"learning_rate": 5.300668149789417e-05,
"loss": 0.847,
"step": 710
},
{
"epoch": 0.873015873015873,
"grad_norm": 0.07886102369723931,
"learning_rate": 4.833336935772442e-05,
"loss": 0.8425,
"step": 715
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.08173627877043077,
"learning_rate": 4.386522366755169e-05,
"loss": 0.8392,
"step": 720
},
{
"epoch": 0.8852258852258852,
"grad_norm": 0.0752411428468848,
"learning_rate": 3.960427405172079e-05,
"loss": 0.8469,
"step": 725
},
{
"epoch": 0.8913308913308914,
"grad_norm": 0.08853754561324295,
"learning_rate": 3.5552456017189926e-05,
"loss": 0.8477,
"step": 730
},
{
"epoch": 0.8974358974358975,
"grad_norm": 0.07850504504773986,
"learning_rate": 3.171161007433937e-05,
"loss": 0.8428,
"step": 735
},
{
"epoch": 0.9035409035409036,
"grad_norm": 0.0779225305884226,
"learning_rate": 2.808348090093277e-05,
"loss": 0.8496,
"step": 740
},
{
"epoch": 0.9096459096459096,
"grad_norm": 0.08610580397739691,
"learning_rate": 2.466971654960931e-05,
"loss": 0.8485,
"step": 745
},
{
"epoch": 0.9157509157509157,
"grad_norm": 0.08126442173982612,
"learning_rate": 2.147186769926712e-05,
"loss": 0.8404,
"step": 750
},
{
"epoch": 0.9218559218559218,
"grad_norm": 0.07309643357333985,
"learning_rate": 1.8491386950677812e-05,
"loss": 0.8441,
"step": 755
},
{
"epoch": 0.927960927960928,
"grad_norm": 0.09343176187392903,
"learning_rate": 1.572962816665302e-05,
"loss": 0.8433,
"step": 760
},
{
"epoch": 0.9340659340659341,
"grad_norm": 0.07826041939717969,
"learning_rate": 1.3187845857061508e-05,
"loss": 0.8359,
"step": 765
},
{
"epoch": 0.9401709401709402,
"grad_norm": 0.08304159121172228,
"learning_rate": 1.0867194608976228e-05,
"loss": 0.8591,
"step": 770
},
{
"epoch": 0.9462759462759462,
"grad_norm": 0.08259928200183242,
"learning_rate": 8.768728562211947e-06,
"loss": 0.8454,
"step": 775
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.08219547432164107,
"learning_rate": 6.893400930488569e-06,
"loss": 0.8486,
"step": 780
},
{
"epoch": 0.9584859584859585,
"grad_norm": 0.07862593185208377,
"learning_rate": 5.242063568441313e-06,
"loss": 0.8483,
"step": 785
},
{
"epoch": 0.9645909645909646,
"grad_norm": 0.07766701411413329,
"learning_rate": 3.815466584670746e-06,
"loss": 0.8408,
"step": 790
},
{
"epoch": 0.9706959706959707,
"grad_norm": 0.07999354583215297,
"learning_rate": 2.6142580010117823e-06,
"loss": 0.8455,
"step": 795
},
{
"epoch": 0.9768009768009768,
"grad_norm": 0.07593923199320168,
"learning_rate": 1.6389834581739814e-06,
"loss": 0.8475,
"step": 800
},
{
"epoch": 0.9829059829059829,
"grad_norm": 0.09079049550630383,
"learning_rate": 8.900859678879769e-07,
"loss": 0.8463,
"step": 805
},
{
"epoch": 0.989010989010989,
"grad_norm": 0.08014122984645816,
"learning_rate": 3.6790571167061305e-07,
"loss": 0.8472,
"step": 810
},
{
"epoch": 0.9951159951159951,
"grad_norm": 0.08774528880462228,
"learning_rate": 7.26798862996092e-08,
"loss": 0.8393,
"step": 815
},
{
"epoch": 1.0,
"eval_loss": 1.2005486488342285,
"eval_runtime": 120.1446,
"eval_samples_per_second": 174.723,
"eval_steps_per_second": 5.46,
"step": 819
},
{
"epoch": 1.0,
"step": 819,
"total_flos": 80357621760000.0,
"train_loss": 0.9138513950492291,
"train_runtime": 1997.4789,
"train_samples_per_second": 52.475,
"train_steps_per_second": 0.41
}
],
"logging_steps": 5,
"max_steps": 819,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 80357621760000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}