gliclass-base-v1.0-init / trainer_state.json
Ihor's picture
Upload folder using huggingface_hub
f7dc0b0 verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.9988002399520095,
"eval_steps": 500,
"global_step": 40000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07498500299940011,
"grad_norm": 32.0,
"learning_rate": 6.248437890527369e-06,
"loss": 1.2095,
"step": 500
},
{
"epoch": 0.14997000599880023,
"grad_norm": 16.5,
"learning_rate": 1.2496875781054738e-05,
"loss": 0.8665,
"step": 1000
},
{
"epoch": 0.22495500899820037,
"grad_norm": 17.75,
"learning_rate": 1.8745313671582104e-05,
"loss": 0.6844,
"step": 1500
},
{
"epoch": 0.29994001199760045,
"grad_norm": 7.78125,
"learning_rate": 2.4993751562109475e-05,
"loss": 0.5099,
"step": 2000
},
{
"epoch": 0.3749250149970006,
"grad_norm": 7.375,
"learning_rate": 3.124218945263684e-05,
"loss": 0.2845,
"step": 2500
},
{
"epoch": 0.44991001799640074,
"grad_norm": 8.0,
"learning_rate": 3.749062734316421e-05,
"loss": 0.2251,
"step": 3000
},
{
"epoch": 0.5248950209958009,
"grad_norm": 6.1875,
"learning_rate": 4.373906523369158e-05,
"loss": 0.206,
"step": 3500
},
{
"epoch": 0.5998800239952009,
"grad_norm": 3.640625,
"learning_rate": 4.998750312421895e-05,
"loss": 0.1874,
"step": 4000
},
{
"epoch": 0.674865026994601,
"grad_norm": 4.8125,
"learning_rate": 4.997630981947256e-05,
"loss": 0.1793,
"step": 4500
},
{
"epoch": 0.7498500299940012,
"grad_norm": 3.953125,
"learning_rate": 4.9905094389741605e-05,
"loss": 0.1708,
"step": 5000
},
{
"epoch": 0.8248350329934013,
"grad_norm": 6.125,
"learning_rate": 4.978648912557427e-05,
"loss": 0.1614,
"step": 5500
},
{
"epoch": 0.8998200359928015,
"grad_norm": 3.15625,
"learning_rate": 4.962071971107133e-05,
"loss": 0.1581,
"step": 6000
},
{
"epoch": 0.9748050389922016,
"grad_norm": 3.984375,
"learning_rate": 4.940810157507576e-05,
"loss": 0.154,
"step": 6500
},
{
"epoch": 1.0,
"eval_accuracy": 0.6357488330240144,
"eval_f1": 0.5235656092218528,
"eval_loss": 0.1340431272983551,
"eval_precision": 0.4623448805170602,
"eval_recall": 0.6357488330240144,
"eval_runtime": 40.157,
"eval_samples_per_second": 295.191,
"eval_steps_per_second": 18.453,
"step": 6668
},
{
"epoch": 1.0497900419916018,
"grad_norm": 2.796875,
"learning_rate": 4.914903929096945e-05,
"loss": 0.1466,
"step": 7000
},
{
"epoch": 1.124775044991002,
"grad_norm": 3.8125,
"learning_rate": 4.884402580684407e-05,
"loss": 0.1503,
"step": 7500
},
{
"epoch": 1.1997600479904018,
"grad_norm": 2.90625,
"learning_rate": 4.8493641507511146e-05,
"loss": 0.1463,
"step": 8000
},
{
"epoch": 1.274745050989802,
"grad_norm": 5.15625,
"learning_rate": 4.809855311013604e-05,
"loss": 0.1473,
"step": 8500
},
{
"epoch": 1.349730053989202,
"grad_norm": 3.3125,
"learning_rate": 4.765951239559725e-05,
"loss": 0.1448,
"step": 9000
},
{
"epoch": 1.4247150569886022,
"grad_norm": 5.125,
"learning_rate": 4.717735477798505e-05,
"loss": 0.1381,
"step": 9500
},
{
"epoch": 1.4997000599880024,
"grad_norm": 3.046875,
"learning_rate": 4.665299771496145e-05,
"loss": 0.1413,
"step": 10000
},
{
"epoch": 1.5746850629874025,
"grad_norm": 4.40625,
"learning_rate": 4.608743896200624e-05,
"loss": 0.1419,
"step": 10500
},
{
"epoch": 1.6496700659868027,
"grad_norm": 3.984375,
"learning_rate": 4.548175467387103e-05,
"loss": 0.1407,
"step": 11000
},
{
"epoch": 1.7246550689862028,
"grad_norm": 4.34375,
"learning_rate": 4.483709735685378e-05,
"loss": 0.1394,
"step": 11500
},
{
"epoch": 1.799640071985603,
"grad_norm": 4.09375,
"learning_rate": 4.415469367579033e-05,
"loss": 0.1379,
"step": 12000
},
{
"epoch": 1.874625074985003,
"grad_norm": 2.0,
"learning_rate": 4.343584211993589e-05,
"loss": 0.1377,
"step": 12500
},
{
"epoch": 1.9496100779844032,
"grad_norm": 2.96875,
"learning_rate": 4.268191053217765e-05,
"loss": 0.1357,
"step": 13000
},
{
"epoch": 2.0,
"eval_accuracy": 0.6386662729880209,
"eval_f1": 0.525822096019018,
"eval_loss": 0.12524983286857605,
"eval_precision": 0.463306913237047,
"eval_recall": 0.6386662729880209,
"eval_runtime": 40.1169,
"eval_samples_per_second": 295.487,
"eval_steps_per_second": 18.471,
"step": 13336
},
{
"epoch": 2.0245950809838034,
"grad_norm": 5.4375,
"learning_rate": 4.189433350628029e-05,
"loss": 0.1366,
"step": 13500
},
{
"epoch": 2.0995800839832035,
"grad_norm": 5.875,
"learning_rate": 4.10746096571167e-05,
"loss": 0.1329,
"step": 14000
},
{
"epoch": 2.1745650869826036,
"grad_norm": 4.15625,
"learning_rate": 4.02242987690783e-05,
"loss": 0.1325,
"step": 14500
},
{
"epoch": 2.249550089982004,
"grad_norm": 3.71875,
"learning_rate": 3.9345018828090864e-05,
"loss": 0.1321,
"step": 15000
},
{
"epoch": 2.324535092981404,
"grad_norm": 2.796875,
"learning_rate": 3.843844294288368e-05,
"loss": 0.1332,
"step": 15500
},
{
"epoch": 2.3995200959808036,
"grad_norm": 3.203125,
"learning_rate": 3.750629616136989e-05,
"loss": 0.1281,
"step": 16000
},
{
"epoch": 2.4745050989802038,
"grad_norm": 2.921875,
"learning_rate": 3.6550352188196244e-05,
"loss": 0.1307,
"step": 16500
},
{
"epoch": 2.549490101979604,
"grad_norm": 4.34375,
"learning_rate": 3.557243000970787e-05,
"loss": 0.1345,
"step": 17000
},
{
"epoch": 2.624475104979004,
"grad_norm": 3.28125,
"learning_rate": 3.457439043275033e-05,
"loss": 0.129,
"step": 17500
},
{
"epoch": 2.699460107978404,
"grad_norm": 3.703125,
"learning_rate": 3.355813254389495e-05,
"loss": 0.135,
"step": 18000
},
{
"epoch": 2.7744451109778043,
"grad_norm": 2.96875,
"learning_rate": 3.252559009582478e-05,
"loss": 0.1336,
"step": 18500
},
{
"epoch": 2.8494301139772045,
"grad_norm": 3.6875,
"learning_rate": 3.14787278277573e-05,
"loss": 0.1327,
"step": 19000
},
{
"epoch": 2.9244151169766046,
"grad_norm": 4.34375,
"learning_rate": 3.0419537726905434e-05,
"loss": 0.1326,
"step": 19500
},
{
"epoch": 2.9994001199760048,
"grad_norm": 4.28125,
"learning_rate": 2.9350035238090666e-05,
"loss": 0.1295,
"step": 20000
},
{
"epoch": 3.0,
"eval_accuracy": 0.6393481806422586,
"eval_f1": 0.5262540403072158,
"eval_loss": 0.12466703355312347,
"eval_precision": 0.4630570055607114,
"eval_recall": 0.6393481806422586,
"eval_runtime": 40.1133,
"eval_samples_per_second": 295.513,
"eval_steps_per_second": 18.473,
"step": 20004
},
{
"epoch": 3.074385122975405,
"grad_norm": 4.625,
"learning_rate": 2.827225542872053e-05,
"loss": 0.1301,
"step": 20500
},
{
"epoch": 3.149370125974805,
"grad_norm": 3.4375,
"learning_rate": 2.7188249116427988e-05,
"loss": 0.1287,
"step": 21000
},
{
"epoch": 3.224355128974205,
"grad_norm": 4.0625,
"learning_rate": 2.6100078966740953e-05,
"loss": 0.1314,
"step": 21500
},
{
"epoch": 3.2993401319736053,
"grad_norm": 4.1875,
"learning_rate": 2.500981556820753e-05,
"loss": 0.1299,
"step": 22000
},
{
"epoch": 3.3743251349730055,
"grad_norm": 4.3125,
"learning_rate": 2.3919533492445064e-05,
"loss": 0.1298,
"step": 22500
},
{
"epoch": 3.4493101379724056,
"grad_norm": 4.25,
"learning_rate": 2.2831307346610255e-05,
"loss": 0.1293,
"step": 23000
},
{
"epoch": 3.5242951409718057,
"grad_norm": 3.640625,
"learning_rate": 2.17472078258016e-05,
"loss": 0.1261,
"step": 23500
},
{
"epoch": 3.599280143971206,
"grad_norm": 3.75,
"learning_rate": 2.066929777290578e-05,
"loss": 0.1298,
"step": 24000
},
{
"epoch": 3.674265146970606,
"grad_norm": 4.9375,
"learning_rate": 1.9599628253385327e-05,
"loss": 0.1302,
"step": 24500
},
{
"epoch": 3.749250149970006,
"grad_norm": 3.4375,
"learning_rate": 1.8540234652476617e-05,
"loss": 0.1295,
"step": 25000
},
{
"epoch": 3.824235152969406,
"grad_norm": 3.875,
"learning_rate": 1.7493132802224482e-05,
"loss": 0.1285,
"step": 25500
},
{
"epoch": 3.8992201559688064,
"grad_norm": 4.15625,
"learning_rate": 1.6460315145722894e-05,
"loss": 0.1272,
"step": 26000
},
{
"epoch": 3.974205158968206,
"grad_norm": 3.890625,
"learning_rate": 1.5443746945860566e-05,
"loss": 0.1294,
"step": 26500
},
{
"epoch": 4.0,
"eval_accuracy": 0.639137281367752,
"eval_f1": 0.5262984000216192,
"eval_loss": 0.12251855432987213,
"eval_precision": 0.4640513156681777,
"eval_recall": 0.639137281367752,
"eval_runtime": 40.1904,
"eval_samples_per_second": 294.946,
"eval_steps_per_second": 18.437,
"step": 26672
},
{
"epoch": 4.049190161967607,
"grad_norm": 3.1875,
"learning_rate": 1.4445362545785581e-05,
"loss": 0.1311,
"step": 27000
},
{
"epoch": 4.124175164967006,
"grad_norm": 3.390625,
"learning_rate": 1.3467061688204524e-05,
"loss": 0.1289,
"step": 27500
},
{
"epoch": 4.199160167966407,
"grad_norm": 6.0625,
"learning_rate": 1.2510705900519926e-05,
"loss": 0.1263,
"step": 28000
},
{
"epoch": 4.274145170965807,
"grad_norm": 4.15625,
"learning_rate": 1.1578114952684529e-05,
"loss": 0.1273,
"step": 28500
},
{
"epoch": 4.349130173965207,
"grad_norm": 4.21875,
"learning_rate": 1.0671063394512279e-05,
"loss": 0.1339,
"step": 29000
},
{
"epoch": 4.424115176964607,
"grad_norm": 4.5,
"learning_rate": 9.791277179034853e-06,
"loss": 0.1286,
"step": 29500
},
{
"epoch": 4.499100179964008,
"grad_norm": 3.921875,
"learning_rate": 8.940430378329174e-06,
"loss": 0.133,
"step": 30000
},
{
"epoch": 4.574085182963407,
"grad_norm": 2.828125,
"learning_rate": 8.120141998064757e-06,
"loss": 0.1241,
"step": 30500
},
{
"epoch": 4.649070185962808,
"grad_norm": 3.203125,
"learning_rate": 7.331972896832292e-06,
"loss": 0.1269,
"step": 31000
},
{
"epoch": 4.7240551889622076,
"grad_norm": 6.40625,
"learning_rate": 6.577422816115633e-06,
"loss": 0.1303,
"step": 31500
},
{
"epoch": 4.799040191961607,
"grad_norm": 3.328125,
"learning_rate": 5.857927526558302e-06,
"loss": 0.1306,
"step": 32000
},
{
"epoch": 4.874025194961008,
"grad_norm": 2.96875,
"learning_rate": 5.174856095954883e-06,
"loss": 0.129,
"step": 32500
},
{
"epoch": 4.9490101979604075,
"grad_norm": 2.65625,
"learning_rate": 4.529508284165662e-06,
"loss": 0.1271,
"step": 33000
},
{
"epoch": 5.0,
"eval_accuracy": 0.6391864911984703,
"eval_f1": 0.5263388946843862,
"eval_loss": 0.12287881225347519,
"eval_precision": 0.464080708996073,
"eval_recall": 0.6391864911984703,
"eval_runtime": 40.1821,
"eval_samples_per_second": 295.007,
"eval_steps_per_second": 18.441,
"step": 33340
},
{
"epoch": 5.023995200959808,
"grad_norm": 2.296875,
"learning_rate": 3.923112069911616e-06,
"loss": 0.1263,
"step": 33500
},
{
"epoch": 5.098980203959208,
"grad_norm": 8.375,
"learning_rate": 3.3568213141557586e-06,
"loss": 0.1289,
"step": 34000
},
{
"epoch": 5.173965206958608,
"grad_norm": 3.21875,
"learning_rate": 2.8317135645169912e-06,
"loss": 0.1291,
"step": 34500
},
{
"epoch": 5.248950209958008,
"grad_norm": 4.1875,
"learning_rate": 2.3487880048942728e-06,
"loss": 0.1271,
"step": 35000
},
{
"epoch": 5.323935212957409,
"grad_norm": 3.765625,
"learning_rate": 1.9089635542026586e-06,
"loss": 0.1306,
"step": 35500
},
{
"epoch": 5.398920215956808,
"grad_norm": 6.21875,
"learning_rate": 1.5130771178388153e-06,
"loss": 0.1287,
"step": 36000
},
{
"epoch": 5.473905218956209,
"grad_norm": 4.65625,
"learning_rate": 1.1618819952033066e-06,
"loss": 0.1268,
"step": 36500
},
{
"epoch": 5.548890221955609,
"grad_norm": 3.625,
"learning_rate": 8.560464463097773e-07,
"loss": 0.1293,
"step": 37000
},
{
"epoch": 5.623875224955009,
"grad_norm": 4.28125,
"learning_rate": 5.961524202084901e-07,
"loss": 0.1292,
"step": 37500
},
{
"epoch": 5.698860227954409,
"grad_norm": 3.03125,
"learning_rate": 3.826944476438388e-07,
"loss": 0.1311,
"step": 38000
},
{
"epoch": 5.7738452309538095,
"grad_norm": 3.609375,
"learning_rate": 2.1607870005291575e-07,
"loss": 0.1269,
"step": 38500
},
{
"epoch": 5.848830233953209,
"grad_norm": 4.6875,
"learning_rate": 9.662221669560767e-08,
"loss": 0.1273,
"step": 39000
},
{
"epoch": 5.92381523695261,
"grad_norm": 3.15625,
"learning_rate": 2.4552301386951415e-08,
"loss": 0.1312,
"step": 39500
},
{
"epoch": 5.9988002399520095,
"grad_norm": 4.78125,
"learning_rate": 6.089979592838191e-12,
"loss": 0.1285,
"step": 40000
}
],
"logging_steps": 500,
"max_steps": 40008,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 1000,
"total_flos": 1.7186915377741824e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}