|
{ |
|
"best_metric": 0.9529491987725878, |
|
"best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/canine/canine-base-finetuned-pos-ud-Hindi-HDTB/checkpoint-14000", |
|
"epoch": 36.05769230769231, |
|
"global_step": 15000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8e-05, |
|
"loss": 1.6829, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.946308724832215e-05, |
|
"loss": 0.6655, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 7.89261744966443e-05, |
|
"loss": 0.4831, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.838926174496645e-05, |
|
"loss": 0.405, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 7.78523489932886e-05, |
|
"loss": 0.3386, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.8978292987839527, |
|
"eval_loss": 0.3005571663379669, |
|
"eval_runtime": 6.5562, |
|
"eval_samples_per_second": 253.044, |
|
"eval_steps_per_second": 31.726, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.731543624161075e-05, |
|
"loss": 0.3137, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 7.677852348993288e-05, |
|
"loss": 0.2831, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 7.624161073825503e-05, |
|
"loss": 0.2711, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 7.570469798657718e-05, |
|
"loss": 0.2314, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 7.516778523489933e-05, |
|
"loss": 0.2131, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_accuracy": 0.9229173769746563, |
|
"eval_loss": 0.25128552317619324, |
|
"eval_runtime": 6.5615, |
|
"eval_samples_per_second": 252.839, |
|
"eval_steps_per_second": 31.7, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.463087248322148e-05, |
|
"loss": 0.2018, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 7.409395973154362e-05, |
|
"loss": 0.2022, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 7.355704697986577e-05, |
|
"loss": 0.1719, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 7.302013422818792e-05, |
|
"loss": 0.1577, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 7.248322147651007e-05, |
|
"loss": 0.1561, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_accuracy": 0.9333446982611661, |
|
"eval_loss": 0.23343516886234283, |
|
"eval_runtime": 6.5751, |
|
"eval_samples_per_second": 252.317, |
|
"eval_steps_per_second": 31.635, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 7.194630872483222e-05, |
|
"loss": 0.1555, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 7.140939597315438e-05, |
|
"loss": 0.1439, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 7.087248322147653e-05, |
|
"loss": 0.1181, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 7.033557046979866e-05, |
|
"loss": 0.1215, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 6.979865771812081e-05, |
|
"loss": 0.1219, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"eval_accuracy": 0.9382316172292305, |
|
"eval_loss": 0.2121623307466507, |
|
"eval_runtime": 6.5443, |
|
"eval_samples_per_second": 253.505, |
|
"eval_steps_per_second": 31.784, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 6.926174496644296e-05, |
|
"loss": 0.117, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 6.87248322147651e-05, |
|
"loss": 0.0911, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 6.818791946308725e-05, |
|
"loss": 0.0925, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 6.76510067114094e-05, |
|
"loss": 0.0956, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 6.711409395973155e-05, |
|
"loss": 0.0943, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"eval_accuracy": 0.9419252187748608, |
|
"eval_loss": 0.2105204463005066, |
|
"eval_runtime": 6.5647, |
|
"eval_samples_per_second": 252.714, |
|
"eval_steps_per_second": 31.684, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 6.65771812080537e-05, |
|
"loss": 0.069, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 6.604026845637585e-05, |
|
"loss": 0.072, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 6.5503355704698e-05, |
|
"loss": 0.0766, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 6.496644295302014e-05, |
|
"loss": 0.0774, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 6.442953020134228e-05, |
|
"loss": 0.0584, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"eval_accuracy": 0.9427207637231504, |
|
"eval_loss": 0.2496863603591919, |
|
"eval_runtime": 6.5209, |
|
"eval_samples_per_second": 254.413, |
|
"eval_steps_per_second": 31.898, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 6.389261744966443e-05, |
|
"loss": 0.0582, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 6.335570469798657e-05, |
|
"loss": 0.0611, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 6.281879194630872e-05, |
|
"loss": 0.063, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 6.228187919463087e-05, |
|
"loss": 0.0509, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 6.174496644295302e-05, |
|
"loss": 0.0501, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"eval_accuracy": 0.9460734174337992, |
|
"eval_loss": 0.24461901187896729, |
|
"eval_runtime": 6.5594, |
|
"eval_samples_per_second": 252.919, |
|
"eval_steps_per_second": 31.71, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 6.120805369127517e-05, |
|
"loss": 0.0516, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 6.067114093959732e-05, |
|
"loss": 0.0505, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 6.013422818791947e-05, |
|
"loss": 0.0431, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 5.959731543624162e-05, |
|
"loss": 0.0393, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 5.906040268456377e-05, |
|
"loss": 0.0397, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"eval_accuracy": 0.9453062848050915, |
|
"eval_loss": 0.2671561539173126, |
|
"eval_runtime": 6.5397, |
|
"eval_samples_per_second": 253.681, |
|
"eval_steps_per_second": 31.806, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 5.8523489932885916e-05, |
|
"loss": 0.0412, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 5.798657718120806e-05, |
|
"loss": 0.0394, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 5.7449664429530206e-05, |
|
"loss": 0.032, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 5.6912751677852354e-05, |
|
"loss": 0.0333, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 5.63758389261745e-05, |
|
"loss": 0.0327, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"eval_accuracy": 0.9442834413001477, |
|
"eval_loss": 0.2739590108394623, |
|
"eval_runtime": 6.5794, |
|
"eval_samples_per_second": 252.151, |
|
"eval_steps_per_second": 31.614, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 5.583892617449665e-05, |
|
"loss": 0.0358, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 5.53020134228188e-05, |
|
"loss": 0.0263, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 5.476510067114095e-05, |
|
"loss": 0.028, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 5.4233557046979874e-05, |
|
"loss": 0.0297, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 5.3696644295302015e-05, |
|
"loss": 0.0306, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"eval_accuracy": 0.9459313558358905, |
|
"eval_loss": 0.2738962173461914, |
|
"eval_runtime": 6.5965, |
|
"eval_samples_per_second": 251.499, |
|
"eval_steps_per_second": 31.532, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"learning_rate": 5.315973154362416e-05, |
|
"loss": 0.0243, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 5.262281879194631e-05, |
|
"loss": 0.0244, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 12.74, |
|
"learning_rate": 5.208590604026846e-05, |
|
"loss": 0.026, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 5.154899328859061e-05, |
|
"loss": 0.0277, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 5.1012080536912756e-05, |
|
"loss": 0.0211, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"eval_accuracy": 0.9471814978974884, |
|
"eval_loss": 0.29070886969566345, |
|
"eval_runtime": 6.5764, |
|
"eval_samples_per_second": 252.266, |
|
"eval_steps_per_second": 31.628, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 5.0475167785234905e-05, |
|
"loss": 0.0222, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 4.993825503355705e-05, |
|
"loss": 0.0224, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"learning_rate": 4.94013422818792e-05, |
|
"loss": 0.0222, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 4.886442953020135e-05, |
|
"loss": 0.0169, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"learning_rate": 4.83275167785235e-05, |
|
"loss": 0.0186, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"eval_accuracy": 0.9481759290828503, |
|
"eval_loss": 0.2863553464412689, |
|
"eval_runtime": 6.5577, |
|
"eval_samples_per_second": 252.985, |
|
"eval_steps_per_second": 31.718, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.66, |
|
"learning_rate": 4.779060402684564e-05, |
|
"loss": 0.0189, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 4.725369127516779e-05, |
|
"loss": 0.0185, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 4.6716778523489936e-05, |
|
"loss": 0.0189, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 4.6179865771812084e-05, |
|
"loss": 0.0148, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 4.564295302013423e-05, |
|
"loss": 0.0166, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"eval_accuracy": 0.94672690078418, |
|
"eval_loss": 0.31074145436286926, |
|
"eval_runtime": 6.5741, |
|
"eval_samples_per_second": 252.355, |
|
"eval_steps_per_second": 31.639, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.87, |
|
"learning_rate": 4.510604026845638e-05, |
|
"loss": 0.017, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 16.11, |
|
"learning_rate": 4.456912751677853e-05, |
|
"loss": 0.0165, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 16.35, |
|
"learning_rate": 4.403221476510068e-05, |
|
"loss": 0.0142, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 4.3495302013422825e-05, |
|
"loss": 0.0133, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 16.83, |
|
"learning_rate": 4.295838926174497e-05, |
|
"loss": 0.0149, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 16.83, |
|
"eval_accuracy": 0.948090692124105, |
|
"eval_loss": 0.30825093388557434, |
|
"eval_runtime": 6.5568, |
|
"eval_samples_per_second": 253.02, |
|
"eval_steps_per_second": 31.723, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 4.242147651006712e-05, |
|
"loss": 0.015, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 17.31, |
|
"learning_rate": 4.188456375838927e-05, |
|
"loss": 0.0124, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 17.55, |
|
"learning_rate": 4.134765100671141e-05, |
|
"loss": 0.0122, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 17.79, |
|
"learning_rate": 4.081073825503356e-05, |
|
"loss": 0.0119, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"learning_rate": 4.027382550335571e-05, |
|
"loss": 0.0138, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"eval_accuracy": 0.9493124218661212, |
|
"eval_loss": 0.3079260587692261, |
|
"eval_runtime": 6.5474, |
|
"eval_samples_per_second": 253.382, |
|
"eval_steps_per_second": 31.768, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 18.27, |
|
"learning_rate": 3.9736912751677856e-05, |
|
"loss": 0.0111, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 18.51, |
|
"learning_rate": 3.9200000000000004e-05, |
|
"loss": 0.0119, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 3.866308724832215e-05, |
|
"loss": 0.01, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 18.99, |
|
"learning_rate": 3.81261744966443e-05, |
|
"loss": 0.0119, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 3.758926174496645e-05, |
|
"loss": 0.0107, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"eval_accuracy": 0.9478918058870326, |
|
"eval_loss": 0.33301591873168945, |
|
"eval_runtime": 6.5621, |
|
"eval_samples_per_second": 252.814, |
|
"eval_steps_per_second": 31.697, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 19.47, |
|
"learning_rate": 3.705234899328859e-05, |
|
"loss": 0.0103, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 19.71, |
|
"learning_rate": 3.651543624161074e-05, |
|
"loss": 0.009, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 19.95, |
|
"learning_rate": 3.597852348993289e-05, |
|
"loss": 0.0109, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 20.19, |
|
"learning_rate": 3.5441610738255035e-05, |
|
"loss": 0.0091, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 20.43, |
|
"learning_rate": 3.490469798657718e-05, |
|
"loss": 0.0094, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 20.43, |
|
"eval_accuracy": 0.9502216160927378, |
|
"eval_loss": 0.3207840025424957, |
|
"eval_runtime": 6.5583, |
|
"eval_samples_per_second": 252.961, |
|
"eval_steps_per_second": 31.715, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 3.436778523489933e-05, |
|
"loss": 0.0096, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"learning_rate": 3.383087248322148e-05, |
|
"loss": 0.0093, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 21.15, |
|
"learning_rate": 3.329395973154363e-05, |
|
"loss": 0.0079, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 21.39, |
|
"learning_rate": 3.2757046979865776e-05, |
|
"loss": 0.0083, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 21.63, |
|
"learning_rate": 3.2220134228187925e-05, |
|
"loss": 0.0079, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 21.63, |
|
"eval_accuracy": 0.9491987725877941, |
|
"eval_loss": 0.33665016293525696, |
|
"eval_runtime": 6.5733, |
|
"eval_samples_per_second": 252.384, |
|
"eval_steps_per_second": 31.643, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 21.88, |
|
"learning_rate": 3.168322147651007e-05, |
|
"loss": 0.008, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 22.12, |
|
"learning_rate": 3.114630872483222e-05, |
|
"loss": 0.0086, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 22.36, |
|
"learning_rate": 3.060939597315436e-05, |
|
"loss": 0.0071, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 22.6, |
|
"learning_rate": 3.0077852348993293e-05, |
|
"loss": 0.0076, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 22.84, |
|
"learning_rate": 2.9540939597315438e-05, |
|
"loss": 0.0074, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 22.84, |
|
"eval_accuracy": 0.9497954312990112, |
|
"eval_loss": 0.3385924994945526, |
|
"eval_runtime": 6.533, |
|
"eval_samples_per_second": 253.942, |
|
"eval_steps_per_second": 31.838, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"learning_rate": 2.9004026845637586e-05, |
|
"loss": 0.0076, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 23.32, |
|
"learning_rate": 2.8467114093959734e-05, |
|
"loss": 0.0059, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 23.56, |
|
"learning_rate": 2.7930201342281882e-05, |
|
"loss": 0.0061, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 23.8, |
|
"learning_rate": 2.739328859060403e-05, |
|
"loss": 0.0054, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"learning_rate": 2.685637583892618e-05, |
|
"loss": 0.0057, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"eval_accuracy": 0.9505909762473008, |
|
"eval_loss": 0.35995256900787354, |
|
"eval_runtime": 6.545, |
|
"eval_samples_per_second": 253.477, |
|
"eval_steps_per_second": 31.78, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 24.28, |
|
"learning_rate": 2.6319463087248324e-05, |
|
"loss": 0.0054, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 24.52, |
|
"learning_rate": 2.5782550335570472e-05, |
|
"loss": 0.0061, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 24.76, |
|
"learning_rate": 2.524563758389262e-05, |
|
"loss": 0.0057, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 2.470872483221477e-05, |
|
"loss": 0.006, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 25.24, |
|
"learning_rate": 2.4171812080536917e-05, |
|
"loss": 0.0052, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 25.24, |
|
"eval_accuracy": 0.9513865211955904, |
|
"eval_loss": 0.34160152077674866, |
|
"eval_runtime": 6.5426, |
|
"eval_samples_per_second": 253.569, |
|
"eval_steps_per_second": 31.792, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 25.48, |
|
"learning_rate": 2.3634899328859065e-05, |
|
"loss": 0.0047, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 25.72, |
|
"learning_rate": 2.3103355704697988e-05, |
|
"loss": 0.0054, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 25.96, |
|
"learning_rate": 2.2566442953020136e-05, |
|
"loss": 0.0048, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 26.2, |
|
"learning_rate": 2.202953020134228e-05, |
|
"loss": 0.0054, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 26.44, |
|
"learning_rate": 2.149261744966443e-05, |
|
"loss": 0.0038, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 26.44, |
|
"eval_accuracy": 0.950448914649392, |
|
"eval_loss": 0.35940098762512207, |
|
"eval_runtime": 6.5479, |
|
"eval_samples_per_second": 253.364, |
|
"eval_steps_per_second": 31.766, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 26.68, |
|
"learning_rate": 2.0955704697986578e-05, |
|
"loss": 0.0041, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 26.92, |
|
"learning_rate": 2.0418791946308726e-05, |
|
"loss": 0.0037, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 27.16, |
|
"learning_rate": 1.9881879194630874e-05, |
|
"loss": 0.004, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 27.4, |
|
"learning_rate": 1.9344966442953022e-05, |
|
"loss": 0.0047, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 27.64, |
|
"learning_rate": 1.880805369127517e-05, |
|
"loss": 0.0048, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 27.64, |
|
"eval_accuracy": 0.9511023979997727, |
|
"eval_loss": 0.35828909277915955, |
|
"eval_runtime": 6.5576, |
|
"eval_samples_per_second": 252.989, |
|
"eval_steps_per_second": 31.719, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 27.88, |
|
"learning_rate": 1.8271140939597316e-05, |
|
"loss": 0.0036, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 28.12, |
|
"learning_rate": 1.7734228187919464e-05, |
|
"loss": 0.0031, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 28.37, |
|
"learning_rate": 1.7197315436241612e-05, |
|
"loss": 0.0032, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 28.61, |
|
"learning_rate": 1.666040268456376e-05, |
|
"loss": 0.0035, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 28.85, |
|
"learning_rate": 1.612348993288591e-05, |
|
"loss": 0.0033, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 28.85, |
|
"eval_accuracy": 0.9508466871235368, |
|
"eval_loss": 0.3843555748462677, |
|
"eval_runtime": 6.5421, |
|
"eval_samples_per_second": 253.588, |
|
"eval_steps_per_second": 31.794, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 29.09, |
|
"learning_rate": 1.5586577181208057e-05, |
|
"loss": 0.0035, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 1.5049664429530202e-05, |
|
"loss": 0.0031, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 29.57, |
|
"learning_rate": 1.451275167785235e-05, |
|
"loss": 0.0029, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 29.81, |
|
"learning_rate": 1.3981208053691275e-05, |
|
"loss": 0.003, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 30.05, |
|
"learning_rate": 1.3444295302013423e-05, |
|
"loss": 0.0036, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 30.05, |
|
"eval_accuracy": 0.951187634958518, |
|
"eval_loss": 0.37571844458580017, |
|
"eval_runtime": 6.5956, |
|
"eval_samples_per_second": 251.531, |
|
"eval_steps_per_second": 31.536, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 30.29, |
|
"learning_rate": 1.2907382550335571e-05, |
|
"loss": 0.0034, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 30.53, |
|
"learning_rate": 1.2370469798657718e-05, |
|
"loss": 0.0025, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 1.1833557046979866e-05, |
|
"loss": 0.0025, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 31.01, |
|
"learning_rate": 1.1296644295302015e-05, |
|
"loss": 0.0028, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 1.0759731543624161e-05, |
|
"loss": 0.0025, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"eval_accuracy": 0.9515285827934993, |
|
"eval_loss": 0.38056984543800354, |
|
"eval_runtime": 6.5762, |
|
"eval_samples_per_second": 252.275, |
|
"eval_steps_per_second": 31.629, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 31.49, |
|
"learning_rate": 1.022281879194631e-05, |
|
"loss": 0.0021, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 31.73, |
|
"learning_rate": 9.685906040268458e-06, |
|
"loss": 0.0022, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 31.97, |
|
"learning_rate": 9.148993288590606e-06, |
|
"loss": 0.0026, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 32.21, |
|
"learning_rate": 8.612080536912752e-06, |
|
"loss": 0.0019, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 32.45, |
|
"learning_rate": 8.0751677852349e-06, |
|
"loss": 0.0018, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 32.45, |
|
"eval_accuracy": 0.9526082509376066, |
|
"eval_loss": 0.3810262680053711, |
|
"eval_runtime": 6.5701, |
|
"eval_samples_per_second": 252.508, |
|
"eval_steps_per_second": 31.659, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 32.69, |
|
"learning_rate": 7.538255033557048e-06, |
|
"loss": 0.0021, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 32.93, |
|
"learning_rate": 7.001342281879195e-06, |
|
"loss": 0.0022, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 33.17, |
|
"learning_rate": 6.464429530201343e-06, |
|
"loss": 0.0017, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 33.41, |
|
"learning_rate": 5.92751677852349e-06, |
|
"loss": 0.0022, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 33.65, |
|
"learning_rate": 5.390604026845638e-06, |
|
"loss": 0.0016, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 33.65, |
|
"eval_accuracy": 0.9529491987725878, |
|
"eval_loss": 0.37525418400764465, |
|
"eval_runtime": 6.5513, |
|
"eval_samples_per_second": 253.233, |
|
"eval_steps_per_second": 31.75, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 33.89, |
|
"learning_rate": 4.853691275167786e-06, |
|
"loss": 0.0017, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 34.13, |
|
"learning_rate": 4.316778523489933e-06, |
|
"loss": 0.0018, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 34.38, |
|
"learning_rate": 3.7798657718120806e-06, |
|
"loss": 0.0016, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 34.62, |
|
"learning_rate": 3.2429530201342284e-06, |
|
"loss": 0.0019, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 34.86, |
|
"learning_rate": 2.706040268456376e-06, |
|
"loss": 0.0014, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 34.86, |
|
"eval_accuracy": 0.95266507557677, |
|
"eval_loss": 0.37661275267601013, |
|
"eval_runtime": 6.5633, |
|
"eval_samples_per_second": 252.77, |
|
"eval_steps_per_second": 31.692, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 35.1, |
|
"learning_rate": 2.1691275167785236e-06, |
|
"loss": 0.0017, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 35.34, |
|
"learning_rate": 1.6322147651006712e-06, |
|
"loss": 0.0015, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 35.58, |
|
"learning_rate": 1.0953020134228189e-06, |
|
"loss": 0.0017, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 35.82, |
|
"learning_rate": 5.583892617449665e-07, |
|
"loss": 0.0013, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 36.06, |
|
"learning_rate": 2.1476510067114093e-08, |
|
"loss": 0.0011, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 36.06, |
|
"eval_accuracy": 0.9524661893396977, |
|
"eval_loss": 0.3779400885105133, |
|
"eval_runtime": 6.5493, |
|
"eval_samples_per_second": 253.31, |
|
"eval_steps_per_second": 31.759, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 36.06, |
|
"step": 15000, |
|
"total_flos": 7.878113593860096e+16, |
|
"train_loss": 0.05964354458153248, |
|
"train_runtime": 2290.905, |
|
"train_samples_per_second": 209.524, |
|
"train_steps_per_second": 6.548 |
|
} |
|
], |
|
"max_steps": 15000, |
|
"num_train_epochs": 37, |
|
"total_flos": 7.878113593860096e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|