Charlie911's picture
Training in progress, epoch 2, checkpoint
7016318 verified
raw
history blame contribute delete
No virus
9.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9956458635703918,
"eval_steps": 500,
"global_step": 516,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05805515239477504,
"grad_norm": 0.600770890712738,
"learning_rate": 2.9999908009303982e-05,
"loss": 2.8908,
"step": 10
},
{
"epoch": 0.11611030478955008,
"grad_norm": 0.3810243308544159,
"learning_rate": 2.998887064264453e-05,
"loss": 2.6786,
"step": 20
},
{
"epoch": 0.1741654571843251,
"grad_norm": 0.2603137493133545,
"learning_rate": 2.9959452370912904e-05,
"loss": 2.5845,
"step": 30
},
{
"epoch": 0.23222060957910015,
"grad_norm": 0.2289140671491623,
"learning_rate": 2.9911693281561532e-05,
"loss": 2.4006,
"step": 40
},
{
"epoch": 0.29027576197387517,
"grad_norm": 0.18332642316818237,
"learning_rate": 2.9845658454559736e-05,
"loss": 2.2503,
"step": 50
},
{
"epoch": 0.3483309143686502,
"grad_norm": 0.21847300231456757,
"learning_rate": 2.9761437873711118e-05,
"loss": 2.0564,
"step": 60
},
{
"epoch": 0.40638606676342526,
"grad_norm": 0.2699638307094574,
"learning_rate": 2.9659146304035112e-05,
"loss": 1.8541,
"step": 70
},
{
"epoch": 0.4644412191582003,
"grad_norm": 0.17481808364391327,
"learning_rate": 2.953892313537998e-05,
"loss": 1.5994,
"step": 80
},
{
"epoch": 0.5224963715529753,
"grad_norm": 0.12553584575653076,
"learning_rate": 2.9400932192480125e-05,
"loss": 1.4072,
"step": 90
},
{
"epoch": 0.5805515239477503,
"grad_norm": 0.12803253531455994,
"learning_rate": 2.9245361511716764e-05,
"loss": 1.3509,
"step": 100
},
{
"epoch": 0.6386066763425254,
"grad_norm": 0.0883030965924263,
"learning_rate": 2.907242308488601e-05,
"loss": 1.3609,
"step": 110
},
{
"epoch": 0.6966618287373004,
"grad_norm": 0.08672440052032471,
"learning_rate": 2.8882352570323616e-05,
"loss": 1.3838,
"step": 120
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.08096282929182053,
"learning_rate": 2.8675408971779997e-05,
"loss": 1.3488,
"step": 130
},
{
"epoch": 0.8127721335268505,
"grad_norm": 0.08677352964878082,
"learning_rate": 2.8451874285483073e-05,
"loss": 1.3902,
"step": 140
},
{
"epoch": 0.8708272859216255,
"grad_norm": 0.06741151958703995,
"learning_rate": 2.8212053115869935e-05,
"loss": 1.319,
"step": 150
},
{
"epoch": 0.9288824383164006,
"grad_norm": 0.0820663794875145,
"learning_rate": 2.7956272260510948e-05,
"loss": 1.3725,
"step": 160
},
{
"epoch": 0.9869375907111756,
"grad_norm": 0.06830213963985443,
"learning_rate": 2.768488026479187e-05,
"loss": 1.3809,
"step": 170
},
{
"epoch": 1.0449927431059507,
"grad_norm": 0.08063437789678574,
"learning_rate": 2.7398246946960864e-05,
"loss": 1.286,
"step": 180
},
{
"epoch": 1.1030478955007257,
"grad_norm": 0.07576021552085876,
"learning_rate": 2.7096762894187533e-05,
"loss": 1.3281,
"step": 190
},
{
"epoch": 1.1611030478955007,
"grad_norm": 0.06808394193649292,
"learning_rate": 2.678083893032079e-05,
"loss": 1.3432,
"step": 200
},
{
"epoch": 1.2191582002902757,
"grad_norm": 0.06429944187402725,
"learning_rate": 2.6450905556070718e-05,
"loss": 1.3108,
"step": 210
},
{
"epoch": 1.2772133526850509,
"grad_norm": 0.07570013403892517,
"learning_rate": 2.610741236237734e-05,
"loss": 1.2846,
"step": 220
},
{
"epoch": 1.3352685050798259,
"grad_norm": 0.0730566531419754,
"learning_rate": 2.5750827417765714e-05,
"loss": 1.2743,
"step": 230
},
{
"epoch": 1.3933236574746009,
"grad_norm": 0.07746976613998413,
"learning_rate": 2.5381636630522056e-05,
"loss": 1.2424,
"step": 240
},
{
"epoch": 1.4513788098693758,
"grad_norm": 0.063927061855793,
"learning_rate": 2.50003430865602e-05,
"loss": 1.3057,
"step": 250
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.06818998605012894,
"learning_rate": 2.4607466363880555e-05,
"loss": 1.2931,
"step": 260
},
{
"epoch": 1.567489114658926,
"grad_norm": 0.07596269994974136,
"learning_rate": 2.4203541824555744e-05,
"loss": 1.3029,
"step": 270
},
{
"epoch": 1.625544267053701,
"grad_norm": 0.07179366797208786,
"learning_rate": 2.378911988520776e-05,
"loss": 1.2844,
"step": 280
},
{
"epoch": 1.683599419448476,
"grad_norm": 0.06046036258339882,
"learning_rate": 2.33647652669707e-05,
"loss": 1.3078,
"step": 290
},
{
"epoch": 1.741654571843251,
"grad_norm": 0.07098483294248581,
"learning_rate": 2.2931056225961137e-05,
"loss": 1.3307,
"step": 300
},
{
"epoch": 1.799709724238026,
"grad_norm": 0.07308895885944366,
"learning_rate": 2.2488583765304748e-05,
"loss": 1.2819,
"step": 310
},
{
"epoch": 1.8577648766328012,
"grad_norm": 0.06851565092802048,
"learning_rate": 2.203795082979298e-05,
"loss": 1.267,
"step": 320
},
{
"epoch": 1.9158200290275762,
"grad_norm": 0.07384185492992401,
"learning_rate": 2.1579771484267097e-05,
"loss": 1.2629,
"step": 330
},
{
"epoch": 1.9738751814223512,
"grad_norm": 0.0664062350988388,
"learning_rate": 2.111467007684928e-05,
"loss": 1.2841,
"step": 340
},
{
"epoch": 2.0319303338171264,
"grad_norm": 0.07113119959831238,
"learning_rate": 2.0643280388160994e-05,
"loss": 1.2779,
"step": 350
},
{
"epoch": 2.0899854862119014,
"grad_norm": 0.07706930488348007,
"learning_rate": 2.0166244767687922e-05,
"loss": 1.2854,
"step": 360
},
{
"epoch": 2.1480406386066764,
"grad_norm": 0.06886892020702362,
"learning_rate": 1.968421325846838e-05,
"loss": 1.2983,
"step": 370
},
{
"epoch": 2.2060957910014514,
"grad_norm": 0.07846487313508987,
"learning_rate": 1.919784271129792e-05,
"loss": 1.286,
"step": 380
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.09121862053871155,
"learning_rate": 1.8707795889657207e-05,
"loss": 1.2774,
"step": 390
},
{
"epoch": 2.3222060957910013,
"grad_norm": 0.07675393670797348,
"learning_rate": 1.8214740566582862e-05,
"loss": 1.2635,
"step": 400
},
{
"epoch": 2.3802612481857763,
"grad_norm": 0.06239638477563858,
"learning_rate": 1.7719348614711877e-05,
"loss": 1.2833,
"step": 410
},
{
"epoch": 2.4383164005805513,
"grad_norm": 0.0633472129702568,
"learning_rate": 1.7222295090739667e-05,
"loss": 1.2657,
"step": 420
},
{
"epoch": 2.4963715529753268,
"grad_norm": 0.0839238315820694,
"learning_rate": 1.672425731553926e-05,
"loss": 1.2424,
"step": 430
},
{
"epoch": 2.5544267053701017,
"grad_norm": 0.061606671661138535,
"learning_rate": 1.6225913951195186e-05,
"loss": 1.2802,
"step": 440
},
{
"epoch": 2.6124818577648767,
"grad_norm": 0.07224655896425247,
"learning_rate": 1.5727944076209725e-05,
"loss": 1.2556,
"step": 450
},
{
"epoch": 2.6705370101596517,
"grad_norm": 0.07351450622081757,
"learning_rate": 1.5231026260141692e-05,
"loss": 1.292,
"step": 460
},
{
"epoch": 2.7285921625544267,
"grad_norm": 0.06462671607732773,
"learning_rate": 1.4735837638938767e-05,
"loss": 1.2588,
"step": 470
},
{
"epoch": 2.7866473149492017,
"grad_norm": 0.0673007071018219,
"learning_rate": 1.4243052992223376e-05,
"loss": 1.2889,
"step": 480
},
{
"epoch": 2.8447024673439767,
"grad_norm": 0.06969607621431351,
"learning_rate": 1.3753343823789447e-05,
"loss": 1.2591,
"step": 490
},
{
"epoch": 2.9027576197387517,
"grad_norm": 0.07535108923912048,
"learning_rate": 1.3267377446563042e-05,
"loss": 1.3026,
"step": 500
},
{
"epoch": 2.9608127721335267,
"grad_norm": 0.06275378167629242,
"learning_rate": 1.2785816073273818e-05,
"loss": 1.2005,
"step": 510
}
],
"logging_steps": 10,
"max_steps": 860,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.381176202297344e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}