TinyLlama-1.1B-Chat-rust-cpp-encodings
/
LORAs
/tinyllama-encoder_2e-4
/checkpoint-2226
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 3.0, | |
"eval_steps": 500, | |
"global_step": 2226, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00019999751023913506, | |
"loss": 1.726, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00019999004108051845, | |
"loss": 1.6889, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.00019997759289607853, | |
"loss": 1.7184, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.00019996016630567534, | |
"loss": 1.4497, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00019993776217706971, | |
"loss": 1.4543, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.00019991038162588018, | |
"loss": 1.5685, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00019987802601552718, | |
"loss": 1.5524, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00019984069695716533, | |
"loss": 1.477, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.0001997983963096033, | |
"loss": 1.4757, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00019975112617921098, | |
"loss": 1.4486, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.0001996988889198147, | |
"loss": 1.461, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.0001996416871325803, | |
"loss": 1.4589, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00019957952366588306, | |
"loss": 1.4624, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.0001995124016151664, | |
"loss": 1.399, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.0001994403243227874, | |
"loss": 1.424, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.00019936329537785052, | |
"loss": 1.3211, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.00019928131861602876, | |
"loss": 1.279, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.0001991943981193728, | |
"loss": 1.3186, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.00019910253821610771, | |
"loss": 1.2727, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00019900574348041727, | |
"loss": 1.3562, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0001989040187322164, | |
"loss": 1.4529, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.00019879736903691107, | |
"loss": 1.2631, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.00019868579970514598, | |
"loss": 1.285, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00019856931629254028, | |
"loss": 1.3466, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.00019844792459941082, | |
"loss": 1.3003, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00019832163067048334, | |
"loss": 1.2173, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.00019819044079459142, | |
"loss": 1.2895, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.00019805436150436352, | |
"loss": 1.3731, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00019791339957589738, | |
"loss": 1.1251, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.00019776756202842294, | |
"loss": 1.2362, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.0001976168561239525, | |
"loss": 1.3371, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.00019746128936691938, | |
"loss": 1.2906, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00019730086950380403, | |
"loss": 1.1032, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00019713560452274838, | |
"loss": 1.1211, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00019696550265315806, | |
"loss": 1.1438, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00019679057236529266, | |
"loss": 1.2266, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00019661082236984386, | |
"loss": 1.3422, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.00019642626161750176, | |
"loss": 1.3473, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00019623689929850908, | |
"loss": 1.2263, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.0001960427448422037, | |
"loss": 1.0773, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019584380791654894, | |
"loss": 1.099, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00019564009842765225, | |
"loss": 1.1754, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00019543162651927188, | |
"loss": 1.2885, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.00019521840257231182, | |
"loss": 1.2681, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.00019500043720430482, | |
"loss": 1.1195, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00019477774126888377, | |
"loss": 1.1689, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00019455032585524116, | |
"loss": 1.0643, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.0001943182022875769, | |
"loss": 1.0415, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00019408138212453454, | |
"loss": 1.1249, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00019383987715862553, | |
"loss": 0.9767, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00019359369941564212, | |
"loss": 1.0936, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.0001933428611540585, | |
"loss": 1.1746, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 0.00019308737486442045, | |
"loss": 1.0178, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 0.00019282725326872323, | |
"loss": 0.91, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 0.00019256250931977826, | |
"loss": 0.9748, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 0.00019229315620056806, | |
"loss": 1.1454, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 0.0001920192073235896, | |
"loss": 0.9785, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 0.00019174067633018682, | |
"loss": 1.0508, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 0.00019145757708987105, | |
"loss": 1.0429, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 0.0001911699236996305, | |
"loss": 0.8784, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 0.00019087773048322818, | |
"loss": 1.0023, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 0.00019058101199048887, | |
"loss": 0.8826, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 0.00019027978299657436, | |
"loss": 1.1243, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 0.00018997405850124785, | |
"loss": 1.1292, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 0.000189663853728127, | |
"loss": 1.1113, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 0.00018934918412392596, | |
"loss": 0.9625, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 0.00018903006535768598, | |
"loss": 0.9192, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 0.00018870651331999542, | |
"loss": 1.1748, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 0.00018837854412219826, | |
"loss": 0.7678, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 0.00018804617409559198, | |
"loss": 0.9359, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 0.0001877094197906143, | |
"loss": 0.7756, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 0.00018736829797601903, | |
"loss": 1.0965, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 0.000187022825638041, | |
"loss": 0.7658, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 0.00018667301997955037, | |
"loss": 0.8303, | |
"step": 740 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 0.00018631889841919596, | |
"loss": 0.6526, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 0.00018596047859053777, | |
"loss": 0.8849, | |
"step": 760 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 0.00018559777834116904, | |
"loss": 0.8624, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 0.00018523081573182753, | |
"loss": 1.0776, | |
"step": 780 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 0.00018485960903549613, | |
"loss": 1.0613, | |
"step": 790 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 0.00018448417673649293, | |
"loss": 0.7977, | |
"step": 800 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 0.0001841045375295508, | |
"loss": 1.0102, | |
"step": 810 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 0.0001837207103188866, | |
"loss": 0.9193, | |
"step": 820 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 0.00018333271421725966, | |
"loss": 0.791, | |
"step": 830 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 0.0001829405685450202, | |
"loss": 0.7282, | |
"step": 840 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 0.00018254429282914717, | |
"loss": 0.909, | |
"step": 850 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 0.00018214390680227588, | |
"loss": 0.7082, | |
"step": 860 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 0.00018173943040171567, | |
"loss": 0.7878, | |
"step": 870 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 0.00018133088376845672, | |
"loss": 0.7337, | |
"step": 880 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 0.0001809182872461674, | |
"loss": 0.8767, | |
"step": 890 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 0.00018050166138018127, | |
"loss": 0.9053, | |
"step": 900 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 0.00018008102691647378, | |
"loss": 0.9425, | |
"step": 910 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 0.00017965640480062947, | |
"loss": 0.8695, | |
"step": 920 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 0.0001792278161767989, | |
"loss": 0.8363, | |
"step": 930 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 0.00017879528238664568, | |
"loss": 0.8174, | |
"step": 940 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 0.00017835882496828391, | |
"loss": 0.8841, | |
"step": 950 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 0.00017791846565520558, | |
"loss": 1.0871, | |
"step": 960 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 0.00017747422637519837, | |
"loss": 0.8819, | |
"step": 970 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 0.00017702612924925376, | |
"loss": 0.9717, | |
"step": 980 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 0.0001765741965904655, | |
"loss": 0.9332, | |
"step": 990 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 0.00017611845090291856, | |
"loss": 0.9255, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 0.00017565891488056847, | |
"loss": 0.8273, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 0.0001751956114061113, | |
"loss": 1.0004, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 0.00017472856354984427, | |
"loss": 0.6283, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 0.00017425779456851684, | |
"loss": 0.7763, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 0.00017378332790417273, | |
"loss": 0.7186, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 0.00017330518718298264, | |
"loss": 0.6694, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 0.00017282339621406764, | |
"loss": 0.7898, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 0.00017233797898831373, | |
"loss": 0.8441, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 0.00017184895967717716, | |
"loss": 0.8105, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 0.00017135636263148078, | |
"loss": 0.6729, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 0.00017086021238020153, | |
"loss": 0.81, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 0.00017036053362924896, | |
"loss": 0.5832, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 0.00016985735126023505, | |
"loss": 0.597, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 0.00016935069032923526, | |
"loss": 0.714, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 0.0001688405760655407, | |
"loss": 1.0678, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 0.000168327033870402, | |
"loss": 0.5946, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 0.00016781008931576432, | |
"loss": 0.8521, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 0.00016728976814299413, | |
"loss": 0.8702, | |
"step": 1180 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 0.0001667660962615973, | |
"loss": 0.6877, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 1.62, | |
"learning_rate": 0.00016623909974792888, | |
"loss": 0.787, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 0.00016570880484389488, | |
"loss": 0.6594, | |
"step": 1210 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 0.00016517523795564528, | |
"loss": 0.7966, | |
"step": 1220 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 0.00016463842565225914, | |
"loss": 0.8111, | |
"step": 1230 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 0.00016409839466442178, | |
"loss": 0.7495, | |
"step": 1240 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 0.00016355517188309363, | |
"loss": 0.5614, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 0.00016300878435817113, | |
"loss": 0.8317, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 0.00016245925929713977, | |
"loss": 0.7025, | |
"step": 1270 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 0.00016190662406371937, | |
"loss": 0.8666, | |
"step": 1280 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 0.0001613509061765015, | |
"loss": 0.6441, | |
"step": 1290 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 0.00016079213330757913, | |
"loss": 0.491, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 0.00016023033328116862, | |
"loss": 0.5865, | |
"step": 1310 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 0.00015966553407222443, | |
"loss": 0.5936, | |
"step": 1320 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 0.00015909776380504585, | |
"loss": 0.72, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 0.00015852705075187674, | |
"loss": 0.6545, | |
"step": 1340 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 0.00015795342333149755, | |
"loss": 0.4781, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 0.0001573769101078104, | |
"loss": 0.858, | |
"step": 1360 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 0.00015679753978841654, | |
"loss": 0.712, | |
"step": 1370 | |
}, | |
{ | |
"epoch": 1.86, | |
"learning_rate": 0.00015621534122318683, | |
"loss": 0.5954, | |
"step": 1380 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 0.0001556303434028254, | |
"loss": 0.7352, | |
"step": 1390 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 0.00015504257545742584, | |
"loss": 0.5647, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 0.0001544520666550207, | |
"loss": 0.7925, | |
"step": 1410 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 0.00015385884640012408, | |
"loss": 0.8434, | |
"step": 1420 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 0.00015326294423226755, | |
"loss": 0.7084, | |
"step": 1430 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 0.00015266438982452897, | |
"loss": 0.75, | |
"step": 1440 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 0.0001520632129820552, | |
"loss": 0.896, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 0.0001514594436405777, | |
"loss": 0.664, | |
"step": 1460 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 0.00015085311186492206, | |
"loss": 0.7149, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 0.00015024424784751078, | |
"loss": 0.7642, | |
"step": 1480 | |
}, | |
{ | |
"epoch": 2.01, | |
"learning_rate": 0.00014963288190685992, | |
"loss": 0.7575, | |
"step": 1490 | |
}, | |
{ | |
"epoch": 2.02, | |
"learning_rate": 0.0001490190444860694, | |
"loss": 0.6248, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 2.04, | |
"learning_rate": 0.00014840276615130687, | |
"loss": 0.5879, | |
"step": 1510 | |
}, | |
{ | |
"epoch": 2.05, | |
"learning_rate": 0.00014778407759028598, | |
"loss": 0.8715, | |
"step": 1520 | |
}, | |
{ | |
"epoch": 2.06, | |
"learning_rate": 0.00014716300961073808, | |
"loss": 0.689, | |
"step": 1530 | |
}, | |
{ | |
"epoch": 2.08, | |
"learning_rate": 0.00014653959313887813, | |
"loss": 0.5215, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 2.09, | |
"learning_rate": 0.00014591385921786484, | |
"loss": 0.4937, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 2.1, | |
"learning_rate": 0.00014528583900625481, | |
"loss": 0.6311, | |
"step": 1560 | |
}, | |
{ | |
"epoch": 2.12, | |
"learning_rate": 0.00014465556377645086, | |
"loss": 0.8815, | |
"step": 1570 | |
}, | |
{ | |
"epoch": 2.13, | |
"learning_rate": 0.00014402306491314508, | |
"loss": 0.6525, | |
"step": 1580 | |
}, | |
{ | |
"epoch": 2.14, | |
"learning_rate": 0.00014338837391175582, | |
"loss": 0.7482, | |
"step": 1590 | |
}, | |
{ | |
"epoch": 2.16, | |
"learning_rate": 0.00014275152237685937, | |
"loss": 0.6613, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 2.17, | |
"learning_rate": 0.00014211254202061633, | |
"loss": 0.5247, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 2.18, | |
"learning_rate": 0.00014147146466119234, | |
"loss": 0.6503, | |
"step": 1620 | |
}, | |
{ | |
"epoch": 2.2, | |
"learning_rate": 0.00014082832222117388, | |
"loss": 0.5727, | |
"step": 1630 | |
}, | |
{ | |
"epoch": 2.21, | |
"learning_rate": 0.00014018314672597848, | |
"loss": 0.67, | |
"step": 1640 | |
}, | |
{ | |
"epoch": 2.22, | |
"learning_rate": 0.00013953597030226007, | |
"loss": 0.6613, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 2.24, | |
"learning_rate": 0.0001388868251763094, | |
"loss": 0.7015, | |
"step": 1660 | |
}, | |
{ | |
"epoch": 2.25, | |
"learning_rate": 0.000138235743672449, | |
"loss": 0.7483, | |
"step": 1670 | |
}, | |
{ | |
"epoch": 2.26, | |
"learning_rate": 0.00013758275821142382, | |
"loss": 0.6186, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 2.28, | |
"learning_rate": 0.00013692790130878684, | |
"loss": 0.582, | |
"step": 1690 | |
}, | |
{ | |
"epoch": 2.29, | |
"learning_rate": 0.00013627120557327982, | |
"loss": 0.5636, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 2.3, | |
"learning_rate": 0.00013561270370520957, | |
"loss": 0.5026, | |
"step": 1710 | |
}, | |
{ | |
"epoch": 2.32, | |
"learning_rate": 0.00013495242849481974, | |
"loss": 0.6765, | |
"step": 1720 | |
}, | |
{ | |
"epoch": 2.33, | |
"learning_rate": 0.00013429041282065788, | |
"loss": 0.4437, | |
"step": 1730 | |
}, | |
{ | |
"epoch": 2.35, | |
"learning_rate": 0.0001336266896479384, | |
"loss": 0.5845, | |
"step": 1740 | |
}, | |
{ | |
"epoch": 2.36, | |
"learning_rate": 0.0001329612920269008, | |
"loss": 0.4177, | |
"step": 1750 | |
}, | |
{ | |
"epoch": 2.37, | |
"learning_rate": 0.0001322942530911643, | |
"loss": 0.6281, | |
"step": 1760 | |
}, | |
{ | |
"epoch": 2.39, | |
"learning_rate": 0.00013162560605607763, | |
"loss": 0.6297, | |
"step": 1770 | |
}, | |
{ | |
"epoch": 2.4, | |
"learning_rate": 0.00013095538421706518, | |
"loss": 0.7309, | |
"step": 1780 | |
}, | |
{ | |
"epoch": 2.41, | |
"learning_rate": 0.0001302836209479691, | |
"loss": 0.5898, | |
"step": 1790 | |
}, | |
{ | |
"epoch": 2.43, | |
"learning_rate": 0.00012961034969938731, | |
"loss": 0.5945, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 2.44, | |
"learning_rate": 0.00012893560399700798, | |
"loss": 0.6987, | |
"step": 1810 | |
}, | |
{ | |
"epoch": 2.45, | |
"learning_rate": 0.0001282594174399399, | |
"loss": 0.4476, | |
"step": 1820 | |
}, | |
{ | |
"epoch": 2.47, | |
"learning_rate": 0.00012758182369903968, | |
"loss": 0.6016, | |
"step": 1830 | |
}, | |
{ | |
"epoch": 2.48, | |
"learning_rate": 0.00012690285651523488, | |
"loss": 0.667, | |
"step": 1840 | |
}, | |
{ | |
"epoch": 2.49, | |
"learning_rate": 0.00012622254969784394, | |
"loss": 0.4903, | |
"step": 1850 | |
}, | |
{ | |
"epoch": 2.51, | |
"learning_rate": 0.00012554093712289265, | |
"loss": 0.4684, | |
"step": 1860 | |
}, | |
{ | |
"epoch": 2.52, | |
"learning_rate": 0.00012485805273142737, | |
"loss": 0.5173, | |
"step": 1870 | |
}, | |
{ | |
"epoch": 2.53, | |
"learning_rate": 0.00012417393052782468, | |
"loss": 0.7519, | |
"step": 1880 | |
}, | |
{ | |
"epoch": 2.55, | |
"learning_rate": 0.00012348860457809838, | |
"loss": 0.5067, | |
"step": 1890 | |
}, | |
{ | |
"epoch": 2.56, | |
"learning_rate": 0.00012280210900820308, | |
"loss": 0.6649, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 2.57, | |
"learning_rate": 0.00012211447800233483, | |
"loss": 0.5927, | |
"step": 1910 | |
}, | |
{ | |
"epoch": 2.59, | |
"learning_rate": 0.00012142574580122903, | |
"loss": 0.8222, | |
"step": 1920 | |
}, | |
{ | |
"epoch": 2.6, | |
"learning_rate": 0.00012073594670045525, | |
"loss": 0.4151, | |
"step": 1930 | |
}, | |
{ | |
"epoch": 2.61, | |
"learning_rate": 0.00012004511504870966, | |
"loss": 0.5066, | |
"step": 1940 | |
}, | |
{ | |
"epoch": 2.63, | |
"learning_rate": 0.00011935328524610443, | |
"loss": 0.4361, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 2.64, | |
"learning_rate": 0.00011866049174245491, | |
"loss": 0.5214, | |
"step": 1960 | |
}, | |
{ | |
"epoch": 2.65, | |
"learning_rate": 0.00011796676903556418, | |
"loss": 0.5282, | |
"step": 1970 | |
}, | |
{ | |
"epoch": 2.67, | |
"learning_rate": 0.00011727215166950519, | |
"loss": 0.5023, | |
"step": 1980 | |
}, | |
{ | |
"epoch": 2.68, | |
"learning_rate": 0.00011657667423290055, | |
"loss": 0.5332, | |
"step": 1990 | |
}, | |
{ | |
"epoch": 2.7, | |
"learning_rate": 0.00011588037135720042, | |
"loss": 0.3463, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 2.71, | |
"learning_rate": 0.0001151832777149578, | |
"loss": 0.448, | |
"step": 2010 | |
}, | |
{ | |
"epoch": 2.72, | |
"learning_rate": 0.00011448542801810203, | |
"loss": 0.6003, | |
"step": 2020 | |
}, | |
{ | |
"epoch": 2.74, | |
"learning_rate": 0.00011378685701621045, | |
"loss": 0.6724, | |
"step": 2030 | |
}, | |
{ | |
"epoch": 2.75, | |
"learning_rate": 0.00011308759949477785, | |
"loss": 0.5553, | |
"step": 2040 | |
}, | |
{ | |
"epoch": 2.76, | |
"learning_rate": 0.00011238769027348452, | |
"loss": 0.4584, | |
"step": 2050 | |
}, | |
{ | |
"epoch": 2.78, | |
"learning_rate": 0.00011168716420446219, | |
"loss": 0.4843, | |
"step": 2060 | |
}, | |
{ | |
"epoch": 2.79, | |
"learning_rate": 0.00011098605617055871, | |
"loss": 0.4809, | |
"step": 2070 | |
}, | |
{ | |
"epoch": 2.8, | |
"learning_rate": 0.00011028440108360092, | |
"loss": 0.6288, | |
"step": 2080 | |
}, | |
{ | |
"epoch": 2.82, | |
"learning_rate": 0.00010958223388265639, | |
"loss": 0.5355, | |
"step": 2090 | |
}, | |
{ | |
"epoch": 2.83, | |
"learning_rate": 0.00010887958953229349, | |
"loss": 0.5807, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 2.84, | |
"learning_rate": 0.00010817650302084027, | |
"loss": 0.5534, | |
"step": 2110 | |
}, | |
{ | |
"epoch": 2.86, | |
"learning_rate": 0.00010747300935864243, | |
"loss": 0.72, | |
"step": 2120 | |
}, | |
{ | |
"epoch": 2.87, | |
"learning_rate": 0.00010676914357631969, | |
"loss": 0.6605, | |
"step": 2130 | |
}, | |
{ | |
"epoch": 2.88, | |
"learning_rate": 0.00010606494072302165, | |
"loss": 0.4739, | |
"step": 2140 | |
}, | |
{ | |
"epoch": 2.9, | |
"learning_rate": 0.00010536043586468237, | |
"loss": 0.3668, | |
"step": 2150 | |
}, | |
{ | |
"epoch": 2.91, | |
"learning_rate": 0.00010465566408227438, | |
"loss": 0.3007, | |
"step": 2160 | |
}, | |
{ | |
"epoch": 2.92, | |
"learning_rate": 0.00010395066047006179, | |
"loss": 0.6202, | |
"step": 2170 | |
}, | |
{ | |
"epoch": 2.94, | |
"learning_rate": 0.00010324546013385259, | |
"loss": 0.592, | |
"step": 2180 | |
}, | |
{ | |
"epoch": 2.95, | |
"learning_rate": 0.00010254009818925082, | |
"loss": 0.5273, | |
"step": 2190 | |
}, | |
{ | |
"epoch": 2.96, | |
"learning_rate": 0.00010183460975990773, | |
"loss": 0.616, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 2.98, | |
"learning_rate": 0.000101129029975773, | |
"loss": 0.6444, | |
"step": 2210 | |
}, | |
{ | |
"epoch": 2.99, | |
"learning_rate": 0.00010042339397134528, | |
"loss": 0.4996, | |
"step": 2220 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 4452, | |
"num_train_epochs": 6, | |
"save_steps": 500, | |
"total_flos": 1.163304048181248e+16, | |
"trial_name": null, | |
"trial_params": null | |
} | |