{ "best_metric": null, "best_model_checkpoint": null, "epoch": 23.369036027263874, "eval_steps": 500, "global_step": 1500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.31, "grad_norm": 0.48931118845939636, "learning_rate": 1.98125e-05, "loss": 1.7417, "step": 20 }, { "epoch": 0.62, "grad_norm": 0.56327223777771, "learning_rate": 1.9604166666666668e-05, "loss": 1.7626, "step": 40 }, { "epoch": 0.93, "grad_norm": 0.6054126620292664, "learning_rate": 1.9395833333333335e-05, "loss": 1.5774, "step": 60 }, { "epoch": 1.25, "grad_norm": 0.8035449385643005, "learning_rate": 1.9187500000000002e-05, "loss": 1.4772, "step": 80 }, { "epoch": 1.56, "grad_norm": 0.9972027540206909, "learning_rate": 1.897916666666667e-05, "loss": 1.409, "step": 100 }, { "epoch": 1.87, "grad_norm": 0.7526600360870361, "learning_rate": 1.8781250000000003e-05, "loss": 1.3088, "step": 120 }, { "epoch": 2.18, "grad_norm": 0.8370587825775146, "learning_rate": 1.8572916666666666e-05, "loss": 1.2086, "step": 140 }, { "epoch": 2.49, "grad_norm": 0.8337924480438232, "learning_rate": 1.8364583333333334e-05, "loss": 1.1321, "step": 160 }, { "epoch": 2.8, "grad_norm": 1.0028570890426636, "learning_rate": 1.815625e-05, "loss": 1.0439, "step": 180 }, { "epoch": 3.12, "grad_norm": 0.7663702368736267, "learning_rate": 1.7947916666666668e-05, "loss": 1.026, "step": 200 }, { "epoch": 3.43, "grad_norm": 0.8835870623588562, "learning_rate": 1.7739583333333335e-05, "loss": 0.9994, "step": 220 }, { "epoch": 3.74, "grad_norm": 1.010624885559082, "learning_rate": 1.7531250000000003e-05, "loss": 0.9381, "step": 240 }, { "epoch": 4.05, "grad_norm": 0.9645354151725769, "learning_rate": 1.7322916666666666e-05, "loss": 0.8667, "step": 260 }, { "epoch": 4.36, "grad_norm": 0.9926008582115173, "learning_rate": 1.7114583333333334e-05, "loss": 0.8729, "step": 280 }, { "epoch": 4.67, "grad_norm": 1.0364482402801514, "learning_rate": 1.690625e-05, "loss": 0.836, "step": 300 }, { "epoch": 4.99, "grad_norm": 1.3519001007080078, "learning_rate": 1.6697916666666668e-05, "loss": 0.8126, "step": 320 }, { "epoch": 5.3, "grad_norm": 1.2280508279800415, "learning_rate": 1.6489583333333335e-05, "loss": 0.7953, "step": 340 }, { "epoch": 5.61, "grad_norm": 0.8931779265403748, "learning_rate": 1.6281250000000003e-05, "loss": 0.7813, "step": 360 }, { "epoch": 5.92, "grad_norm": 2.7053027153015137, "learning_rate": 1.6072916666666667e-05, "loss": 0.7256, "step": 380 }, { "epoch": 6.23, "grad_norm": 1.411024808883667, "learning_rate": 1.5864583333333334e-05, "loss": 0.7336, "step": 400 }, { "epoch": 6.54, "grad_norm": 1.0111807584762573, "learning_rate": 1.565625e-05, "loss": 0.7207, "step": 420 }, { "epoch": 6.85, "grad_norm": 2.213623523712158, "learning_rate": 1.544791666666667e-05, "loss": 0.664, "step": 440 }, { "epoch": 7.17, "grad_norm": 1.3642323017120361, "learning_rate": 1.5239583333333334e-05, "loss": 0.6848, "step": 460 }, { "epoch": 7.48, "grad_norm": 1.3692028522491455, "learning_rate": 1.5031250000000001e-05, "loss": 0.6663, "step": 480 }, { "epoch": 7.79, "grad_norm": 1.9850131273269653, "learning_rate": 1.4822916666666667e-05, "loss": 0.6199, "step": 500 }, { "epoch": 8.1, "grad_norm": 1.6070563793182373, "learning_rate": 1.4614583333333334e-05, "loss": 0.6295, "step": 520 }, { "epoch": 8.41, "grad_norm": 3.226116418838501, "learning_rate": 1.4406250000000001e-05, "loss": 0.6227, "step": 540 }, { "epoch": 8.72, "grad_norm": 1.5464348793029785, "learning_rate": 1.4197916666666667e-05, "loss": 0.589, "step": 560 }, { "epoch": 9.04, "grad_norm": 1.4290672540664673, "learning_rate": 1.3989583333333334e-05, "loss": 0.6263, "step": 580 }, { "epoch": 9.35, "grad_norm": 1.682243824005127, "learning_rate": 1.3781250000000001e-05, "loss": 0.5747, "step": 600 }, { "epoch": 9.66, "grad_norm": 1.6785274744033813, "learning_rate": 1.3572916666666667e-05, "loss": 0.5809, "step": 620 }, { "epoch": 9.97, "grad_norm": 2.156558036804199, "learning_rate": 1.3364583333333334e-05, "loss": 0.5725, "step": 640 }, { "epoch": 10.28, "grad_norm": 1.739134430885315, "learning_rate": 1.3156250000000001e-05, "loss": 0.549, "step": 660 }, { "epoch": 10.59, "grad_norm": 1.2140896320343018, "learning_rate": 1.2947916666666667e-05, "loss": 0.5493, "step": 680 }, { "epoch": 10.91, "grad_norm": 1.517707109451294, "learning_rate": 1.2739583333333334e-05, "loss": 0.5589, "step": 700 }, { "epoch": 11.22, "grad_norm": 1.8752249479293823, "learning_rate": 1.2531250000000001e-05, "loss": 0.5001, "step": 720 }, { "epoch": 11.53, "grad_norm": 1.8684614896774292, "learning_rate": 1.2322916666666667e-05, "loss": 0.52, "step": 740 }, { "epoch": 11.84, "grad_norm": 2.3552052974700928, "learning_rate": 1.2114583333333334e-05, "loss": 0.5479, "step": 760 }, { "epoch": 12.15, "grad_norm": 1.2074153423309326, "learning_rate": 1.1906250000000001e-05, "loss": 0.4977, "step": 780 }, { "epoch": 12.46, "grad_norm": 3.0007381439208984, "learning_rate": 1.1697916666666667e-05, "loss": 0.5252, "step": 800 }, { "epoch": 12.78, "grad_norm": 1.8956348896026611, "learning_rate": 1.1489583333333334e-05, "loss": 0.503, "step": 820 }, { "epoch": 13.09, "grad_norm": 1.3779544830322266, "learning_rate": 1.1281250000000001e-05, "loss": 0.4991, "step": 840 }, { "epoch": 13.4, "grad_norm": 1.5404250621795654, "learning_rate": 1.1072916666666667e-05, "loss": 0.5026, "step": 860 }, { "epoch": 13.71, "grad_norm": 2.149167060852051, "learning_rate": 1.0864583333333334e-05, "loss": 0.4788, "step": 880 }, { "epoch": 14.02, "grad_norm": 1.4978464841842651, "learning_rate": 1.0656250000000002e-05, "loss": 0.4605, "step": 900 }, { "epoch": 14.33, "grad_norm": 1.5203664302825928, "learning_rate": 1.0447916666666667e-05, "loss": 0.4858, "step": 920 }, { "epoch": 14.64, "grad_norm": 1.850074291229248, "learning_rate": 1.0239583333333334e-05, "loss": 0.4797, "step": 940 }, { "epoch": 14.96, "grad_norm": 1.7591063976287842, "learning_rate": 1.0031250000000002e-05, "loss": 0.4619, "step": 960 }, { "epoch": 15.27, "grad_norm": 1.4994142055511475, "learning_rate": 9.822916666666667e-06, "loss": 0.4627, "step": 980 }, { "epoch": 15.58, "grad_norm": 1.888311743736267, "learning_rate": 9.614583333333334e-06, "loss": 0.4469, "step": 1000 }, { "epoch": 15.89, "grad_norm": 1.7547377347946167, "learning_rate": 9.406250000000002e-06, "loss": 0.4379, "step": 1020 }, { "epoch": 16.2, "grad_norm": 1.353371262550354, "learning_rate": 9.197916666666667e-06, "loss": 0.4464, "step": 1040 }, { "epoch": 16.51, "grad_norm": 1.8960446119308472, "learning_rate": 8.989583333333334e-06, "loss": 0.4181, "step": 1060 }, { "epoch": 16.83, "grad_norm": 1.644089937210083, "learning_rate": 8.781250000000002e-06, "loss": 0.481, "step": 1080 }, { "epoch": 17.14, "grad_norm": 2.3077402114868164, "learning_rate": 8.572916666666667e-06, "loss": 0.4199, "step": 1100 }, { "epoch": 17.45, "grad_norm": 2.0317394733428955, "learning_rate": 8.364583333333334e-06, "loss": 0.4178, "step": 1120 }, { "epoch": 17.76, "grad_norm": 1.8424252271652222, "learning_rate": 8.156250000000002e-06, "loss": 0.4355, "step": 1140 }, { "epoch": 18.07, "grad_norm": 2.730163335800171, "learning_rate": 7.947916666666667e-06, "loss": 0.4449, "step": 1160 }, { "epoch": 18.38, "grad_norm": 1.7422763109207153, "learning_rate": 7.739583333333333e-06, "loss": 0.437, "step": 1180 }, { "epoch": 18.7, "grad_norm": 1.8822110891342163, "learning_rate": 7.531250000000001e-06, "loss": 0.4313, "step": 1200 }, { "epoch": 19.01, "grad_norm": 1.8389759063720703, "learning_rate": 7.322916666666667e-06, "loss": 0.4112, "step": 1220 }, { "epoch": 19.32, "grad_norm": 1.2449133396148682, "learning_rate": 7.114583333333334e-06, "loss": 0.3969, "step": 1240 }, { "epoch": 19.63, "grad_norm": 1.4220046997070312, "learning_rate": 6.906250000000001e-06, "loss": 0.4306, "step": 1260 }, { "epoch": 19.94, "grad_norm": 1.3844712972640991, "learning_rate": 6.697916666666667e-06, "loss": 0.4085, "step": 1280 }, { "epoch": 20.25, "grad_norm": 1.5047410726547241, "learning_rate": 6.489583333333334e-06, "loss": 0.4421, "step": 1300 }, { "epoch": 20.56, "grad_norm": 1.6129776239395142, "learning_rate": 6.281250000000001e-06, "loss": 0.42, "step": 1320 }, { "epoch": 20.88, "grad_norm": 3.006173849105835, "learning_rate": 6.0729166666666675e-06, "loss": 0.3902, "step": 1340 }, { "epoch": 21.19, "grad_norm": 1.5414382219314575, "learning_rate": 5.864583333333334e-06, "loss": 0.3927, "step": 1360 }, { "epoch": 21.5, "grad_norm": 2.1584370136260986, "learning_rate": 5.656250000000001e-06, "loss": 0.4052, "step": 1380 }, { "epoch": 21.81, "grad_norm": 1.9206900596618652, "learning_rate": 5.4479166666666675e-06, "loss": 0.3872, "step": 1400 }, { "epoch": 22.12, "grad_norm": 1.6601423025131226, "learning_rate": 5.239583333333333e-06, "loss": 0.4221, "step": 1420 }, { "epoch": 22.43, "grad_norm": 1.8656572103500366, "learning_rate": 5.031250000000001e-06, "loss": 0.4031, "step": 1440 }, { "epoch": 22.75, "grad_norm": 1.9469555616378784, "learning_rate": 4.822916666666667e-06, "loss": 0.395, "step": 1460 }, { "epoch": 23.06, "grad_norm": 1.801340103149414, "learning_rate": 4.614583333333334e-06, "loss": 0.3584, "step": 1480 }, { "epoch": 23.37, "grad_norm": 2.101327657699585, "learning_rate": 4.40625e-06, "loss": 0.4129, "step": 1500 } ], "logging_steps": 20, "max_steps": 1920, "num_input_tokens_seen": 0, "num_train_epochs": 30, "save_steps": 500, "total_flos": 3.8986916806656e+18, "train_batch_size": 1, "trial_name": null, "trial_params": null }