TinyLlama-1.1B-Chat-rust-cpp-encodings
/
LORAs
/tinyllama-encoder_4e-5
/checkpoint-742
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.0, | |
"eval_steps": 500, | |
"global_step": 742, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.01, | |
"learning_rate": 3.999950204782701e-05, | |
"loss": 1.7807, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.999800821610369e-05, | |
"loss": 1.781, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 3.999551857921571e-05, | |
"loss": 1.8258, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 3.999203326113507e-05, | |
"loss": 1.5688, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 3.9987552435413944e-05, | |
"loss": 1.5824, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 3.9982076325176035e-05, | |
"loss": 1.7159, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 3.9975605203105434e-05, | |
"loss": 1.7095, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 3.996813939143307e-05, | |
"loss": 1.6194, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 3.9959679261920665e-05, | |
"loss": 1.6602, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 3.995022523584219e-05, | |
"loss": 1.6734, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 3.9939777783962946e-05, | |
"loss": 1.715, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 3.992833742651606e-05, | |
"loss": 1.7322, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 3.9915904733176614e-05, | |
"loss": 1.6645, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 3.9902480323033285e-05, | |
"loss": 1.6249, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 3.9888064864557486e-05, | |
"loss": 1.6279, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 3.987265907557011e-05, | |
"loss": 1.5166, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 3.9856263723205755e-05, | |
"loss": 1.4946, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 3.983887962387457e-05, | |
"loss": 1.5583, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 3.982050764322154e-05, | |
"loss": 1.526, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 3.9801148696083455e-05, | |
"loss": 1.6003, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 3.9780803746443284e-05, | |
"loss": 1.6403, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 3.9759473807382214e-05, | |
"loss": 1.5128, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 3.97371599410292e-05, | |
"loss": 1.4739, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 3.9713863258508064e-05, | |
"loss": 1.5466, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 3.968958491988216e-05, | |
"loss": 1.5307, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 3.966432613409667e-05, | |
"loss": 1.4508, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 3.9638088158918285e-05, | |
"loss": 1.5163, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 3.9610872300872704e-05, | |
"loss": 1.5583, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 3.958267991517948e-05, | |
"loss": 1.3893, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 3.955351240568459e-05, | |
"loss": 1.4718, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 3.9523371224790505e-05, | |
"loss": 1.479, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 3.949225787338388e-05, | |
"loss": 1.4874, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 3.946017390076081e-05, | |
"loss": 1.2723, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 3.942712090454968e-05, | |
"loss": 1.3925, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 3.939310053063161e-05, | |
"loss": 1.3833, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 3.935811447305853e-05, | |
"loss": 1.4989, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 3.9322164473968774e-05, | |
"loss": 1.4961, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 3.928525232350035e-05, | |
"loss": 1.5295, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 3.924737985970182e-05, | |
"loss": 1.4051, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 3.920854896844074e-05, | |
"loss": 1.3306, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 3.916876158330979e-05, | |
"loss": 1.3522, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 3.912801968553045e-05, | |
"loss": 1.4479, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 3.908632530385438e-05, | |
"loss": 1.5481, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 3.9043680514462366e-05, | |
"loss": 1.5375, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 3.900008744086097e-05, | |
"loss": 1.321, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 3.895554825377676e-05, | |
"loss": 1.4584, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 3.891006517104823e-05, | |
"loss": 1.4188, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 3.886364045751538e-05, | |
"loss": 1.2557, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 3.881627642490691e-05, | |
"loss": 1.412, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 3.876797543172511e-05, | |
"loss": 1.3267, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 3.871873988312842e-05, | |
"loss": 1.3768, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 3.86685722308117e-05, | |
"loss": 1.5065, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 3.861747497288409e-05, | |
"loss": 1.382, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 3.856545065374465e-05, | |
"loss": 1.3336, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 3.851250186395565e-05, | |
"loss": 1.2626, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 3.845863124011361e-05, | |
"loss": 1.4174, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 3.840384146471792e-05, | |
"loss": 1.3371, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 3.8348135266037364e-05, | |
"loss": 1.3496, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 3.829151541797421e-05, | |
"loss": 1.2245, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 3.82339847399261e-05, | |
"loss": 1.2381, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.817554609664564e-05, | |
"loss": 1.2805, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.811620239809778e-05, | |
"loss": 1.2055, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 3.805595659931487e-05, | |
"loss": 1.3493, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 3.799481170024957e-05, | |
"loss": 1.4545, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 3.7932770745625406e-05, | |
"loss": 1.3633, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 3.786983682478519e-05, | |
"loss": 1.2677, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 3.78060130715372e-05, | |
"loss": 1.2218, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 3.7741302663999085e-05, | |
"loss": 1.4738, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 3.7675708824439656e-05, | |
"loss": 1.191, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 3.76092348191184e-05, | |
"loss": 1.2747, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 3.7541883958122864e-05, | |
"loss": 1.2833, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 3.7473659595203806e-05, | |
"loss": 1.3725, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 3.74045651276082e-05, | |
"loss": 1.31, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 3.7334603995910075e-05, | |
"loss": 1.2406, | |
"step": 740 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 4452, | |
"num_train_epochs": 6, | |
"save_steps": 500, | |
"total_flos": 3877680160604160.0, | |
"trial_name": null, | |
"trial_params": null | |
} | |