| { | |
| "best_metric": 0.034271180629730225, | |
| "best_model_checkpoint": "saves/psy-course/Llama3-OpenBioLLM-8B/train/fold7/checkpoint-1950", | |
| "epoch": 4.9961802902979375, | |
| "eval_steps": 50, | |
| "global_step": 3270, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015278838808250574, | |
| "grad_norm": 5.722738265991211, | |
| "learning_rate": 3.0581039755351682e-06, | |
| "loss": 1.3515, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.030557677616501147, | |
| "grad_norm": 5.476120471954346, | |
| "learning_rate": 6.1162079510703365e-06, | |
| "loss": 1.2833, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04583651642475172, | |
| "grad_norm": 3.9035332202911377, | |
| "learning_rate": 9.174311926605506e-06, | |
| "loss": 1.0237, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.061115355233002294, | |
| "grad_norm": 2.4191477298736572, | |
| "learning_rate": 1.2232415902140673e-05, | |
| "loss": 0.8029, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07639419404125286, | |
| "grad_norm": 2.650960683822632, | |
| "learning_rate": 1.5290519877675842e-05, | |
| "loss": 0.5143, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07639419404125286, | |
| "eval_loss": 0.30689939856529236, | |
| "eval_runtime": 192.1143, | |
| "eval_samples_per_second": 6.059, | |
| "eval_steps_per_second": 6.059, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09167303284950344, | |
| "grad_norm": 1.2615604400634766, | |
| "learning_rate": 1.834862385321101e-05, | |
| "loss": 0.2999, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10695187165775401, | |
| "grad_norm": 1.4181287288665771, | |
| "learning_rate": 2.140672782874618e-05, | |
| "loss": 0.2094, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12223071046600459, | |
| "grad_norm": 1.572482705116272, | |
| "learning_rate": 2.4464831804281346e-05, | |
| "loss": 0.1623, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13750954927425516, | |
| "grad_norm": 1.0281938314437866, | |
| "learning_rate": 2.7522935779816515e-05, | |
| "loss": 0.1138, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15278838808250572, | |
| "grad_norm": 1.8321815729141235, | |
| "learning_rate": 3.0581039755351684e-05, | |
| "loss": 0.1115, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15278838808250572, | |
| "eval_loss": 0.08002135157585144, | |
| "eval_runtime": 191.5562, | |
| "eval_samples_per_second": 6.077, | |
| "eval_steps_per_second": 6.077, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16806722689075632, | |
| "grad_norm": 1.0883564949035645, | |
| "learning_rate": 3.363914373088685e-05, | |
| "loss": 0.0747, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18334606569900688, | |
| "grad_norm": 1.412797212600708, | |
| "learning_rate": 3.669724770642202e-05, | |
| "loss": 0.0824, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19862490450725745, | |
| "grad_norm": 1.3624087572097778, | |
| "learning_rate": 3.9755351681957185e-05, | |
| "loss": 0.0737, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21390374331550802, | |
| "grad_norm": 1.5038727521896362, | |
| "learning_rate": 4.281345565749236e-05, | |
| "loss": 0.0822, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22918258212375858, | |
| "grad_norm": 1.0052672624588013, | |
| "learning_rate": 4.587155963302753e-05, | |
| "loss": 0.0781, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22918258212375858, | |
| "eval_loss": 0.06360089778900146, | |
| "eval_runtime": 192.0197, | |
| "eval_samples_per_second": 6.062, | |
| "eval_steps_per_second": 6.062, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24446142093200918, | |
| "grad_norm": 1.2365893125534058, | |
| "learning_rate": 4.892966360856269e-05, | |
| "loss": 0.0754, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2597402597402597, | |
| "grad_norm": 0.8989318609237671, | |
| "learning_rate": 5.1987767584097854e-05, | |
| "loss": 0.0666, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2750190985485103, | |
| "grad_norm": 0.5810503959655762, | |
| "learning_rate": 5.504587155963303e-05, | |
| "loss": 0.0705, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2902979373567609, | |
| "grad_norm": 1.2738935947418213, | |
| "learning_rate": 5.81039755351682e-05, | |
| "loss": 0.0741, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.30557677616501144, | |
| "grad_norm": 0.8120205402374268, | |
| "learning_rate": 6.116207951070337e-05, | |
| "loss": 0.0677, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.30557677616501144, | |
| "eval_loss": 0.059481341391801834, | |
| "eval_runtime": 190.8939, | |
| "eval_samples_per_second": 6.098, | |
| "eval_steps_per_second": 6.098, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32085561497326204, | |
| "grad_norm": 0.9474322199821472, | |
| "learning_rate": 6.422018348623854e-05, | |
| "loss": 0.0619, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.33613445378151263, | |
| "grad_norm": 0.926907479763031, | |
| "learning_rate": 6.72782874617737e-05, | |
| "loss": 0.0806, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35141329258976317, | |
| "grad_norm": 0.8283106684684753, | |
| "learning_rate": 7.033639143730886e-05, | |
| "loss": 0.0677, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.36669213139801377, | |
| "grad_norm": 0.6710531115531921, | |
| "learning_rate": 7.339449541284404e-05, | |
| "loss": 0.0606, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3819709702062643, | |
| "grad_norm": 0.729621946811676, | |
| "learning_rate": 7.645259938837921e-05, | |
| "loss": 0.0609, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3819709702062643, | |
| "eval_loss": 0.04995494335889816, | |
| "eval_runtime": 190.9738, | |
| "eval_samples_per_second": 6.095, | |
| "eval_steps_per_second": 6.095, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3972498090145149, | |
| "grad_norm": 0.7670791149139404, | |
| "learning_rate": 7.951070336391437e-05, | |
| "loss": 0.0462, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4125286478227655, | |
| "grad_norm": 1.007689118385315, | |
| "learning_rate": 8.256880733944955e-05, | |
| "loss": 0.0799, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42780748663101603, | |
| "grad_norm": 0.5677183270454407, | |
| "learning_rate": 8.562691131498472e-05, | |
| "loss": 0.0758, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4430863254392666, | |
| "grad_norm": 0.5293294787406921, | |
| "learning_rate": 8.868501529051988e-05, | |
| "loss": 0.0554, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.45836516424751717, | |
| "grad_norm": 0.7656412720680237, | |
| "learning_rate": 9.174311926605506e-05, | |
| "loss": 0.0442, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45836516424751717, | |
| "eval_loss": 0.051944799721241, | |
| "eval_runtime": 191.224, | |
| "eval_samples_per_second": 6.087, | |
| "eval_steps_per_second": 6.087, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.47364400305576776, | |
| "grad_norm": 0.5527473092079163, | |
| "learning_rate": 9.480122324159021e-05, | |
| "loss": 0.0433, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.48892284186401835, | |
| "grad_norm": 0.3190058469772339, | |
| "learning_rate": 9.785932721712538e-05, | |
| "loss": 0.0546, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5042016806722689, | |
| "grad_norm": 0.486265629529953, | |
| "learning_rate": 9.999974360983129e-05, | |
| "loss": 0.0597, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5194805194805194, | |
| "grad_norm": 0.5260328650474548, | |
| "learning_rate": 9.999518563553522e-05, | |
| "loss": 0.0626, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5347593582887701, | |
| "grad_norm": 0.6759735941886902, | |
| "learning_rate": 9.998493069976636e-05, | |
| "loss": 0.0605, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5347593582887701, | |
| "eval_loss": 0.052965257316827774, | |
| "eval_runtime": 192.2041, | |
| "eval_samples_per_second": 6.056, | |
| "eval_steps_per_second": 6.056, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5500381970970206, | |
| "grad_norm": 0.8925366401672363, | |
| "learning_rate": 9.99689799710767e-05, | |
| "loss": 0.0647, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5653170359052712, | |
| "grad_norm": 0.5414925217628479, | |
| "learning_rate": 9.994733526705501e-05, | |
| "loss": 0.0662, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5805958747135218, | |
| "grad_norm": 0.5703091621398926, | |
| "learning_rate": 9.991999905411966e-05, | |
| "loss": 0.044, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5958747135217723, | |
| "grad_norm": 0.2930344045162201, | |
| "learning_rate": 9.988697444723762e-05, | |
| "loss": 0.0395, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6111535523300229, | |
| "grad_norm": 0.26469382643699646, | |
| "learning_rate": 9.984826520956949e-05, | |
| "loss": 0.0415, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6111535523300229, | |
| "eval_loss": 0.042674873024225235, | |
| "eval_runtime": 191.5669, | |
| "eval_samples_per_second": 6.076, | |
| "eval_steps_per_second": 6.076, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6264323911382735, | |
| "grad_norm": 0.45453405380249023, | |
| "learning_rate": 9.980387575204072e-05, | |
| "loss": 0.0442, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6417112299465241, | |
| "grad_norm": 0.5698592662811279, | |
| "learning_rate": 9.975381113283891e-05, | |
| "loss": 0.035, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6569900687547746, | |
| "grad_norm": 0.5528980493545532, | |
| "learning_rate": 9.969807705683751e-05, | |
| "loss": 0.0449, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6722689075630253, | |
| "grad_norm": 0.8066198229789734, | |
| "learning_rate": 9.96366798749457e-05, | |
| "loss": 0.03, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6875477463712758, | |
| "grad_norm": 0.3302541971206665, | |
| "learning_rate": 9.956962658338473e-05, | |
| "loss": 0.0616, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6875477463712758, | |
| "eval_loss": 0.04298438876867294, | |
| "eval_runtime": 190.871, | |
| "eval_samples_per_second": 6.098, | |
| "eval_steps_per_second": 6.098, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7028265851795263, | |
| "grad_norm": 0.3077934980392456, | |
| "learning_rate": 9.94969248228907e-05, | |
| "loss": 0.0393, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7181054239877769, | |
| "grad_norm": 0.46446335315704346, | |
| "learning_rate": 9.941858287784383e-05, | |
| "loss": 0.0514, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7333842627960275, | |
| "grad_norm": 0.5021407604217529, | |
| "learning_rate": 9.933460967532453e-05, | |
| "loss": 0.0513, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7486631016042781, | |
| "grad_norm": 0.5457306504249573, | |
| "learning_rate": 9.924501478409618e-05, | |
| "loss": 0.0527, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7639419404125286, | |
| "grad_norm": 0.38309529423713684, | |
| "learning_rate": 9.914980841351465e-05, | |
| "loss": 0.0412, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7639419404125286, | |
| "eval_loss": 0.03890955075621605, | |
| "eval_runtime": 191.337, | |
| "eval_samples_per_second": 6.084, | |
| "eval_steps_per_second": 6.084, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7792207792207793, | |
| "grad_norm": 0.4321908950805664, | |
| "learning_rate": 9.904900141236506e-05, | |
| "loss": 0.0329, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7944996180290298, | |
| "grad_norm": 0.5725101232528687, | |
| "learning_rate": 9.894260526762548e-05, | |
| "loss": 0.0507, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8097784568372803, | |
| "grad_norm": 0.7723375558853149, | |
| "learning_rate": 9.883063210315804e-05, | |
| "loss": 0.0627, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.825057295645531, | |
| "grad_norm": 0.49958401918411255, | |
| "learning_rate": 9.871309467832738e-05, | |
| "loss": 0.0471, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8403361344537815, | |
| "grad_norm": 0.32487210631370544, | |
| "learning_rate": 9.859000638654674e-05, | |
| "loss": 0.0306, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8403361344537815, | |
| "eval_loss": 0.03846091032028198, | |
| "eval_runtime": 191.5057, | |
| "eval_samples_per_second": 6.078, | |
| "eval_steps_per_second": 6.078, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8556149732620321, | |
| "grad_norm": 0.5225566029548645, | |
| "learning_rate": 9.846138125375175e-05, | |
| "loss": 0.0523, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8708938120702827, | |
| "grad_norm": 0.3816261291503906, | |
| "learning_rate": 9.83272339368022e-05, | |
| "loss": 0.0352, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8861726508785333, | |
| "grad_norm": 0.3570096790790558, | |
| "learning_rate": 9.818757972181191e-05, | |
| "loss": 0.0547, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9014514896867838, | |
| "grad_norm": 0.6031997799873352, | |
| "learning_rate": 9.804243452240675e-05, | |
| "loss": 0.0377, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9167303284950343, | |
| "grad_norm": 0.955195963382721, | |
| "learning_rate": 9.789181487791146e-05, | |
| "loss": 0.0579, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9167303284950343, | |
| "eval_loss": 0.03784943372011185, | |
| "eval_runtime": 191.7708, | |
| "eval_samples_per_second": 6.07, | |
| "eval_steps_per_second": 6.07, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.932009167303285, | |
| "grad_norm": 0.5558242201805115, | |
| "learning_rate": 9.773573795146485e-05, | |
| "loss": 0.0581, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9472880061115355, | |
| "grad_norm": 0.3050296902656555, | |
| "learning_rate": 9.757422152806415e-05, | |
| "loss": 0.0342, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9625668449197861, | |
| "grad_norm": 0.41481053829193115, | |
| "learning_rate": 9.74072840125383e-05, | |
| "loss": 0.0506, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9778456837280367, | |
| "grad_norm": 0.6876723170280457, | |
| "learning_rate": 9.723494442745085e-05, | |
| "loss": 0.0474, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9931245225362872, | |
| "grad_norm": 0.638076663017273, | |
| "learning_rate": 9.705722241093223e-05, | |
| "loss": 0.0625, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9931245225362872, | |
| "eval_loss": 0.042443711310625076, | |
| "eval_runtime": 191.6589, | |
| "eval_samples_per_second": 6.073, | |
| "eval_steps_per_second": 6.073, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0084033613445378, | |
| "grad_norm": 0.3265399634838104, | |
| "learning_rate": 9.687413821444199e-05, | |
| "loss": 0.0475, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0236822001527883, | |
| "grad_norm": 0.2271881401538849, | |
| "learning_rate": 9.668571270046122e-05, | |
| "loss": 0.04, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0389610389610389, | |
| "grad_norm": 0.20923583209514618, | |
| "learning_rate": 9.649196734011519e-05, | |
| "loss": 0.0353, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0542398777692896, | |
| "grad_norm": 0.9111831784248352, | |
| "learning_rate": 9.629292421072671e-05, | |
| "loss": 0.029, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0695187165775402, | |
| "grad_norm": 0.21794196963310242, | |
| "learning_rate": 9.608860599330048e-05, | |
| "loss": 0.0363, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0695187165775402, | |
| "eval_loss": 0.0368080735206604, | |
| "eval_runtime": 190.2458, | |
| "eval_samples_per_second": 6.118, | |
| "eval_steps_per_second": 6.118, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0847975553857907, | |
| "grad_norm": 0.1954701691865921, | |
| "learning_rate": 9.587903596993854e-05, | |
| "loss": 0.0337, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1000763941940412, | |
| "grad_norm": 0.3832463324069977, | |
| "learning_rate": 9.566423802118724e-05, | |
| "loss": 0.036, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1153552330022918, | |
| "grad_norm": 0.120274618268013, | |
| "learning_rate": 9.544423662331612e-05, | |
| "loss": 0.048, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1306340718105423, | |
| "grad_norm": 0.8174816966056824, | |
| "learning_rate": 9.521905684552877e-05, | |
| "loss": 0.0354, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1459129106187929, | |
| "grad_norm": 0.31460389494895935, | |
| "learning_rate": 9.498872434710623e-05, | |
| "loss": 0.024, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1459129106187929, | |
| "eval_loss": 0.037662845104932785, | |
| "eval_runtime": 188.5908, | |
| "eval_samples_per_second": 6.172, | |
| "eval_steps_per_second": 6.172, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1611917494270436, | |
| "grad_norm": 0.26857221126556396, | |
| "learning_rate": 9.475326537448307e-05, | |
| "loss": 0.0303, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 0.2545701861381531, | |
| "learning_rate": 9.451270675825665e-05, | |
| "loss": 0.0275, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1917494270435447, | |
| "grad_norm": 0.41096410155296326, | |
| "learning_rate": 9.426707591012976e-05, | |
| "loss": 0.0333, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2070282658517952, | |
| "grad_norm": 0.383953332901001, | |
| "learning_rate": 9.4016400819787e-05, | |
| "loss": 0.0413, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2223071046600458, | |
| "grad_norm": 0.6644073724746704, | |
| "learning_rate": 9.376071005170539e-05, | |
| "loss": 0.0331, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2223071046600458, | |
| "eval_loss": 0.037403445690870285, | |
| "eval_runtime": 187.4305, | |
| "eval_samples_per_second": 6.21, | |
| "eval_steps_per_second": 6.21, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2375859434682965, | |
| "grad_norm": 0.2898555099964142, | |
| "learning_rate": 9.350003274189949e-05, | |
| "loss": 0.0371, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2528647822765469, | |
| "grad_norm": 0.6009165644645691, | |
| "learning_rate": 9.323439859460122e-05, | |
| "loss": 0.0376, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2681436210847976, | |
| "grad_norm": 0.3177230954170227, | |
| "learning_rate": 9.296383787887519e-05, | |
| "loss": 0.0342, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2834224598930482, | |
| "grad_norm": 0.3252789080142975, | |
| "learning_rate": 9.268838142516943e-05, | |
| "loss": 0.0446, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.2987012987012987, | |
| "grad_norm": 0.22259239852428436, | |
| "learning_rate": 9.240806062180234e-05, | |
| "loss": 0.0368, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.2987012987012987, | |
| "eval_loss": 0.03829605504870415, | |
| "eval_runtime": 187.1071, | |
| "eval_samples_per_second": 6.221, | |
| "eval_steps_per_second": 6.221, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3139801375095492, | |
| "grad_norm": 0.17059855163097382, | |
| "learning_rate": 9.212290741138592e-05, | |
| "loss": 0.0184, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3292589763177998, | |
| "grad_norm": 0.18967171013355255, | |
| "learning_rate": 9.183295428718592e-05, | |
| "loss": 0.0318, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3445378151260505, | |
| "grad_norm": 0.3216451406478882, | |
| "learning_rate": 9.153823428941924e-05, | |
| "loss": 0.0403, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.359816653934301, | |
| "grad_norm": 0.32410064339637756, | |
| "learning_rate": 9.1238781001489e-05, | |
| "loss": 0.0311, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3750954927425516, | |
| "grad_norm": 0.2728131115436554, | |
| "learning_rate": 9.093462854615766e-05, | |
| "loss": 0.039, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3750954927425516, | |
| "eval_loss": 0.03588823974132538, | |
| "eval_runtime": 186.8024, | |
| "eval_samples_per_second": 6.231, | |
| "eval_steps_per_second": 6.231, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3903743315508021, | |
| "grad_norm": 0.28432366251945496, | |
| "learning_rate": 9.062581158165876e-05, | |
| "loss": 0.0353, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4056531703590527, | |
| "grad_norm": 0.3692541718482971, | |
| "learning_rate": 9.031236529774764e-05, | |
| "loss": 0.0277, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.4209320091673032, | |
| "grad_norm": 0.20645208656787872, | |
| "learning_rate": 8.999432541169145e-05, | |
| "loss": 0.035, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4362108479755538, | |
| "grad_norm": 0.28323686122894287, | |
| "learning_rate": 8.967172816419927e-05, | |
| "loss": 0.0319, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4514896867838045, | |
| "grad_norm": 0.3196757435798645, | |
| "learning_rate": 8.934461031529242e-05, | |
| "loss": 0.0428, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4514896867838045, | |
| "eval_loss": 0.03870726749300957, | |
| "eval_runtime": 188.2883, | |
| "eval_samples_per_second": 6.182, | |
| "eval_steps_per_second": 6.182, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.466768525592055, | |
| "grad_norm": 0.42527279257774353, | |
| "learning_rate": 8.901300914011569e-05, | |
| "loss": 0.0339, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4820473644003056, | |
| "grad_norm": 0.12177124619483948, | |
| "learning_rate": 8.867696242468976e-05, | |
| "loss": 0.0247, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4973262032085561, | |
| "grad_norm": 0.33116525411605835, | |
| "learning_rate": 8.833650846160555e-05, | |
| "loss": 0.0413, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5126050420168067, | |
| "grad_norm": 0.33314356207847595, | |
| "learning_rate": 8.79916860456607e-05, | |
| "loss": 0.0292, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5278838808250574, | |
| "grad_norm": 0.23561441898345947, | |
| "learning_rate": 8.7642534469439e-05, | |
| "loss": 0.0285, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5278838808250574, | |
| "eval_loss": 0.03498704358935356, | |
| "eval_runtime": 189.0743, | |
| "eval_samples_per_second": 6.156, | |
| "eval_steps_per_second": 6.156, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5431627196333078, | |
| "grad_norm": 0.6858423352241516, | |
| "learning_rate": 8.728909351883283e-05, | |
| "loss": 0.0279, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5584415584415585, | |
| "grad_norm": 0.43968966603279114, | |
| "learning_rate": 8.693140346850975e-05, | |
| "loss": 0.0401, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.573720397249809, | |
| "grad_norm": 0.24436284601688385, | |
| "learning_rate": 8.656950507732303e-05, | |
| "loss": 0.0275, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5889992360580596, | |
| "grad_norm": 0.15837892889976501, | |
| "learning_rate": 8.620343958366718e-05, | |
| "loss": 0.0358, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6042780748663101, | |
| "grad_norm": 0.40071073174476624, | |
| "learning_rate": 8.5833248700779e-05, | |
| "loss": 0.0292, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6042780748663101, | |
| "eval_loss": 0.0367753803730011, | |
| "eval_runtime": 189.1042, | |
| "eval_samples_per_second": 6.155, | |
| "eval_steps_per_second": 6.155, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6195569136745607, | |
| "grad_norm": 0.12202508002519608, | |
| "learning_rate": 8.545897461198413e-05, | |
| "loss": 0.0326, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6348357524828114, | |
| "grad_norm": 0.18900205194950104, | |
| "learning_rate": 8.508065996589036e-05, | |
| "loss": 0.0285, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6501145912910617, | |
| "grad_norm": 0.15240703523159027, | |
| "learning_rate": 8.469834787152783e-05, | |
| "loss": 0.0352, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6653934300993125, | |
| "grad_norm": 0.28794217109680176, | |
| "learning_rate": 8.43120818934367e-05, | |
| "loss": 0.0306, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.680672268907563, | |
| "grad_norm": 0.4002177119255066, | |
| "learning_rate": 8.392190604670293e-05, | |
| "loss": 0.0317, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.680672268907563, | |
| "eval_loss": 0.036450888961553574, | |
| "eval_runtime": 189.1055, | |
| "eval_samples_per_second": 6.155, | |
| "eval_steps_per_second": 6.155, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6959511077158136, | |
| "grad_norm": 0.2935492992401123, | |
| "learning_rate": 8.352786479194288e-05, | |
| "loss": 0.0383, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7112299465240641, | |
| "grad_norm": 0.3728632926940918, | |
| "learning_rate": 8.313000303023688e-05, | |
| "loss": 0.0287, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7265087853323147, | |
| "grad_norm": 0.2868274450302124, | |
| "learning_rate": 8.27283660980128e-05, | |
| "loss": 0.0369, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7417876241405654, | |
| "grad_norm": 0.210665762424469, | |
| "learning_rate": 8.232299976187999e-05, | |
| "loss": 0.0361, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7570664629488157, | |
| "grad_norm": 0.2601427733898163, | |
| "learning_rate": 8.191395021341408e-05, | |
| "loss": 0.0506, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7570664629488157, | |
| "eval_loss": 0.03487667441368103, | |
| "eval_runtime": 188.1603, | |
| "eval_samples_per_second": 6.186, | |
| "eval_steps_per_second": 6.186, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7723453017570665, | |
| "grad_norm": 0.34721285104751587, | |
| "learning_rate": 8.150126406389352e-05, | |
| "loss": 0.0303, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.787624140565317, | |
| "grad_norm": 0.3729540705680847, | |
| "learning_rate": 8.108498833898815e-05, | |
| "loss": 0.038, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8029029793735676, | |
| "grad_norm": 0.3638748526573181, | |
| "learning_rate": 8.066517047340066e-05, | |
| "loss": 0.032, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.14144481718540192, | |
| "learning_rate": 8.02418583054614e-05, | |
| "loss": 0.0198, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8334606569900687, | |
| "grad_norm": 0.40133315324783325, | |
| "learning_rate": 7.981510007167719e-05, | |
| "loss": 0.0329, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8334606569900687, | |
| "eval_loss": 0.035291481763124466, | |
| "eval_runtime": 187.8454, | |
| "eval_samples_per_second": 6.197, | |
| "eval_steps_per_second": 6.197, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8487394957983194, | |
| "grad_norm": 0.3257274329662323, | |
| "learning_rate": 7.938494440123468e-05, | |
| "loss": 0.031, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8640183346065697, | |
| "grad_norm": 0.16669751703739166, | |
| "learning_rate": 7.895144031045918e-05, | |
| "loss": 0.0349, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8792971734148205, | |
| "grad_norm": 0.2631896138191223, | |
| "learning_rate": 7.851463719722913e-05, | |
| "loss": 0.0234, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.894576012223071, | |
| "grad_norm": 0.33011606335639954, | |
| "learning_rate": 7.80745848353473e-05, | |
| "loss": 0.0334, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9098548510313216, | |
| "grad_norm": 0.27665475010871887, | |
| "learning_rate": 7.763133336886892e-05, | |
| "loss": 0.0352, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9098548510313216, | |
| "eval_loss": 0.03766217082738876, | |
| "eval_runtime": 188.3048, | |
| "eval_samples_per_second": 6.181, | |
| "eval_steps_per_second": 6.181, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9251336898395723, | |
| "grad_norm": 0.15627293288707733, | |
| "learning_rate": 7.718493330638789e-05, | |
| "loss": 0.0245, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9404125286478227, | |
| "grad_norm": 0.11793619394302368, | |
| "learning_rate": 7.673543551528122e-05, | |
| "loss": 0.0391, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9556913674560734, | |
| "grad_norm": 0.46485260128974915, | |
| "learning_rate": 7.628289121591277e-05, | |
| "loss": 0.0365, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.970970206264324, | |
| "grad_norm": 0.28952929377555847, | |
| "learning_rate": 7.582735197579656e-05, | |
| "loss": 0.0318, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9862490450725745, | |
| "grad_norm": 0.24003995954990387, | |
| "learning_rate": 7.536886970372078e-05, | |
| "loss": 0.0289, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9862490450725745, | |
| "eval_loss": 0.036530643701553345, | |
| "eval_runtime": 188.1847, | |
| "eval_samples_per_second": 6.185, | |
| "eval_steps_per_second": 6.185, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0015278838808253, | |
| "grad_norm": 0.23869691789150238, | |
| "learning_rate": 7.490749664383271e-05, | |
| "loss": 0.0428, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.0168067226890756, | |
| "grad_norm": 0.18940101563930511, | |
| "learning_rate": 7.444328536968538e-05, | |
| "loss": 0.0265, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0320855614973263, | |
| "grad_norm": 0.4082794487476349, | |
| "learning_rate": 7.397628877824701e-05, | |
| "loss": 0.0227, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0473644003055766, | |
| "grad_norm": 0.13684681057929993, | |
| "learning_rate": 7.350656008387327e-05, | |
| "loss": 0.0201, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0626432391138274, | |
| "grad_norm": 0.1932537704706192, | |
| "learning_rate": 7.303415281224346e-05, | |
| "loss": 0.0202, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0626432391138274, | |
| "eval_loss": 0.03559866175055504, | |
| "eval_runtime": 188.1857, | |
| "eval_samples_per_second": 6.185, | |
| "eval_steps_per_second": 6.185, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0779220779220777, | |
| "grad_norm": 0.1311611384153366, | |
| "learning_rate": 7.255912079426136e-05, | |
| "loss": 0.0164, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0932009167303285, | |
| "grad_norm": 0.40545451641082764, | |
| "learning_rate": 7.208151815992107e-05, | |
| "loss": 0.0198, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.1084797555385792, | |
| "grad_norm": 0.45390045642852783, | |
| "learning_rate": 7.160139933213898e-05, | |
| "loss": 0.0272, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.1237585943468296, | |
| "grad_norm": 0.31190603971481323, | |
| "learning_rate": 7.111881902055223e-05, | |
| "loss": 0.0196, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1390374331550803, | |
| "grad_norm": 0.49418479204177856, | |
| "learning_rate": 7.06338322152845e-05, | |
| "loss": 0.0174, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1390374331550803, | |
| "eval_loss": 0.035685863345861435, | |
| "eval_runtime": 187.4939, | |
| "eval_samples_per_second": 6.208, | |
| "eval_steps_per_second": 6.208, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1543162719633306, | |
| "grad_norm": 0.2847372889518738, | |
| "learning_rate": 7.014649418067994e-05, | |
| "loss": 0.0185, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.1695951107715814, | |
| "grad_norm": 0.16619278490543365, | |
| "learning_rate": 6.965686044900577e-05, | |
| "loss": 0.0149, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.184873949579832, | |
| "grad_norm": 0.44972145557403564, | |
| "learning_rate": 6.91649868141243e-05, | |
| "loss": 0.0149, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2001527883880825, | |
| "grad_norm": 0.15861999988555908, | |
| "learning_rate": 6.86709293251353e-05, | |
| "loss": 0.0106, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.2154316271963332, | |
| "grad_norm": 0.5113412141799927, | |
| "learning_rate": 6.817474427998916e-05, | |
| "loss": 0.0134, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2154316271963332, | |
| "eval_loss": 0.03945557773113251, | |
| "eval_runtime": 187.1894, | |
| "eval_samples_per_second": 6.218, | |
| "eval_steps_per_second": 6.218, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2307104660045836, | |
| "grad_norm": 0.7107367515563965, | |
| "learning_rate": 6.767648821907172e-05, | |
| "loss": 0.0208, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2459893048128343, | |
| "grad_norm": 0.2532961666584015, | |
| "learning_rate": 6.717621791876147e-05, | |
| "loss": 0.0317, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2612681436210846, | |
| "grad_norm": 0.25595831871032715, | |
| "learning_rate": 6.667399038495986e-05, | |
| "loss": 0.0228, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2765469824293354, | |
| "grad_norm": 0.2862846851348877, | |
| "learning_rate": 6.616986284659556e-05, | |
| "loss": 0.0238, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.2918258212375857, | |
| "grad_norm": 0.1290646344423294, | |
| "learning_rate": 6.566389274910309e-05, | |
| "loss": 0.02, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.2918258212375857, | |
| "eval_loss": 0.03613383322954178, | |
| "eval_runtime": 187.5145, | |
| "eval_samples_per_second": 6.208, | |
| "eval_steps_per_second": 6.208, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3071046600458365, | |
| "grad_norm": 0.1208987608551979, | |
| "learning_rate": 6.515613774787697e-05, | |
| "loss": 0.0161, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.3223834988540872, | |
| "grad_norm": 0.14849679172039032, | |
| "learning_rate": 6.464665570170186e-05, | |
| "loss": 0.0226, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3376623376623376, | |
| "grad_norm": 0.1842430830001831, | |
| "learning_rate": 6.413550466615952e-05, | |
| "loss": 0.0188, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 0.22144414484500885, | |
| "learning_rate": 6.362274288701342e-05, | |
| "loss": 0.016, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.3682200152788386, | |
| "grad_norm": 0.2850957214832306, | |
| "learning_rate": 6.310842879357158e-05, | |
| "loss": 0.0189, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3682200152788386, | |
| "eval_loss": 0.03735971450805664, | |
| "eval_runtime": 187.5259, | |
| "eval_samples_per_second": 6.207, | |
| "eval_steps_per_second": 6.207, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3834988540870894, | |
| "grad_norm": 0.28386402130126953, | |
| "learning_rate": 6.25926209920285e-05, | |
| "loss": 0.0129, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.39877769289534, | |
| "grad_norm": 0.3930361866950989, | |
| "learning_rate": 6.207537825878708e-05, | |
| "loss": 0.0177, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.4140565317035905, | |
| "grad_norm": 0.1254359781742096, | |
| "learning_rate": 6.155675953376095e-05, | |
| "loss": 0.0278, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4293353705118412, | |
| "grad_norm": 0.2683659493923187, | |
| "learning_rate": 6.103682391365828e-05, | |
| "loss": 0.0247, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4446142093200915, | |
| "grad_norm": 0.20878136157989502, | |
| "learning_rate": 6.05156306452477e-05, | |
| "loss": 0.0162, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4446142093200915, | |
| "eval_loss": 0.03479843959212303, | |
| "eval_runtime": 187.7453, | |
| "eval_samples_per_second": 6.2, | |
| "eval_steps_per_second": 6.2, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4598930481283423, | |
| "grad_norm": 0.2738895118236542, | |
| "learning_rate": 5.9993239118607124e-05, | |
| "loss": 0.0255, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.475171886936593, | |
| "grad_norm": 0.23021462559700012, | |
| "learning_rate": 5.9469708860356246e-05, | |
| "loss": 0.0173, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.4904507257448434, | |
| "grad_norm": 0.23016996681690216, | |
| "learning_rate": 5.89450995268734e-05, | |
| "loss": 0.0198, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5057295645530937, | |
| "grad_norm": 0.37153613567352295, | |
| "learning_rate": 5.841947089749783e-05, | |
| "loss": 0.0226, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.5210084033613445, | |
| "grad_norm": 0.3245033919811249, | |
| "learning_rate": 5.78928828677177e-05, | |
| "loss": 0.0252, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5210084033613445, | |
| "eval_loss": 0.037063825875520706, | |
| "eval_runtime": 187.1008, | |
| "eval_samples_per_second": 6.221, | |
| "eval_steps_per_second": 6.221, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5362872421695952, | |
| "grad_norm": 0.7465736865997314, | |
| "learning_rate": 5.7365395442345085e-05, | |
| "loss": 0.0186, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5515660809778455, | |
| "grad_norm": 0.25831788778305054, | |
| "learning_rate": 5.683706872867833e-05, | |
| "loss": 0.0179, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5668449197860963, | |
| "grad_norm": 0.32766786217689514, | |
| "learning_rate": 5.630796292965288e-05, | |
| "loss": 0.0271, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5821237585943466, | |
| "grad_norm": 0.38363370299339294, | |
| "learning_rate": 5.57781383369811e-05, | |
| "loss": 0.0204, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.5974025974025974, | |
| "grad_norm": 0.14547067880630493, | |
| "learning_rate": 5.524765532428203e-05, | |
| "loss": 0.0175, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5974025974025974, | |
| "eval_loss": 0.03661783039569855, | |
| "eval_runtime": 186.6526, | |
| "eval_samples_per_second": 6.236, | |
| "eval_steps_per_second": 6.236, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.612681436210848, | |
| "grad_norm": 0.29803889989852905, | |
| "learning_rate": 5.471657434020182e-05, | |
| "loss": 0.0212, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6279602750190985, | |
| "grad_norm": 0.31547999382019043, | |
| "learning_rate": 5.418495590152557e-05, | |
| "loss": 0.0233, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.643239113827349, | |
| "grad_norm": 0.19628596305847168, | |
| "learning_rate": 5.365286058628145e-05, | |
| "loss": 0.0236, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.6585179526355995, | |
| "grad_norm": 0.3903220593929291, | |
| "learning_rate": 5.312034902683779e-05, | |
| "loss": 0.0264, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.6737967914438503, | |
| "grad_norm": 0.42966052889823914, | |
| "learning_rate": 5.258748190299404e-05, | |
| "loss": 0.0222, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6737967914438503, | |
| "eval_loss": 0.03461781516671181, | |
| "eval_runtime": 184.1896, | |
| "eval_samples_per_second": 6.32, | |
| "eval_steps_per_second": 6.32, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.689075630252101, | |
| "grad_norm": 0.2390514761209488, | |
| "learning_rate": 5.20543199350663e-05, | |
| "loss": 0.0203, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.7043544690603514, | |
| "grad_norm": 0.17239676415920258, | |
| "learning_rate": 5.152092387696821e-05, | |
| "loss": 0.0235, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.719633307868602, | |
| "grad_norm": 0.29174742102622986, | |
| "learning_rate": 5.0987354509287985e-05, | |
| "loss": 0.0189, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.7349121466768525, | |
| "grad_norm": 0.35961881279945374, | |
| "learning_rate": 5.045367263236257e-05, | |
| "loss": 0.016, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.750190985485103, | |
| "grad_norm": 0.15872785449028015, | |
| "learning_rate": 4.991993905934931e-05, | |
| "loss": 0.0274, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.750190985485103, | |
| "eval_loss": 0.03465921804308891, | |
| "eval_runtime": 185.6419, | |
| "eval_samples_per_second": 6.27, | |
| "eval_steps_per_second": 6.27, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.765469824293354, | |
| "grad_norm": 0.28768208622932434, | |
| "learning_rate": 4.938621460929639e-05, | |
| "loss": 0.0187, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7807486631016043, | |
| "grad_norm": 0.24558648467063904, | |
| "learning_rate": 4.885256010021233e-05, | |
| "loss": 0.0185, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.7960275019098546, | |
| "grad_norm": 0.27898794412612915, | |
| "learning_rate": 4.831903634213599e-05, | |
| "loss": 0.0193, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.8113063407181054, | |
| "grad_norm": 0.06646784394979477, | |
| "learning_rate": 4.778570413020702e-05, | |
| "loss": 0.021, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.826585179526356, | |
| "grad_norm": 0.43815556168556213, | |
| "learning_rate": 4.725262423773838e-05, | |
| "loss": 0.0215, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.826585179526356, | |
| "eval_loss": 0.03624486178159714, | |
| "eval_runtime": 185.8089, | |
| "eval_samples_per_second": 6.265, | |
| "eval_steps_per_second": 6.265, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8418640183346064, | |
| "grad_norm": 0.22074271738529205, | |
| "learning_rate": 4.671985740929123e-05, | |
| "loss": 0.0138, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.05057336017489433, | |
| "learning_rate": 4.618746435375295e-05, | |
| "loss": 0.0212, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8724216959511075, | |
| "grad_norm": 0.21193920075893402, | |
| "learning_rate": 4.565550573741942e-05, | |
| "loss": 0.0175, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.8877005347593583, | |
| "grad_norm": 0.5380321741104126, | |
| "learning_rate": 4.512404217708217e-05, | |
| "loss": 0.0242, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.902979373567609, | |
| "grad_norm": 0.33445772528648376, | |
| "learning_rate": 4.45931342331209e-05, | |
| "loss": 0.0201, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.902979373567609, | |
| "eval_loss": 0.03776334598660469, | |
| "eval_runtime": 185.4273, | |
| "eval_samples_per_second": 6.277, | |
| "eval_steps_per_second": 6.277, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9182582123758594, | |
| "grad_norm": 0.5133573412895203, | |
| "learning_rate": 4.406284240260278e-05, | |
| "loss": 0.0282, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.93353705118411, | |
| "grad_norm": 0.1490231305360794, | |
| "learning_rate": 4.3533227112388694e-05, | |
| "loss": 0.0184, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.9488158899923604, | |
| "grad_norm": 0.25799980759620667, | |
| "learning_rate": 4.300434871224763e-05, | |
| "loss": 0.0256, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.964094728800611, | |
| "grad_norm": 0.3950966000556946, | |
| "learning_rate": 4.247626746797983e-05, | |
| "loss": 0.0231, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.979373567608862, | |
| "grad_norm": 0.24436689913272858, | |
| "learning_rate": 4.1949043554549406e-05, | |
| "loss": 0.016, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.979373567608862, | |
| "eval_loss": 0.034271180629730225, | |
| "eval_runtime": 185.359, | |
| "eval_samples_per_second": 6.28, | |
| "eval_steps_per_second": 6.28, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9946524064171123, | |
| "grad_norm": 0.43147724866867065, | |
| "learning_rate": 4.14227370492275e-05, | |
| "loss": 0.0254, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.009931245225363, | |
| "grad_norm": 0.0808895006775856, | |
| "learning_rate": 4.08974079247464e-05, | |
| "loss": 0.0153, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0252100840336134, | |
| "grad_norm": 0.024843959137797356, | |
| "learning_rate": 4.037311604246565e-05, | |
| "loss": 0.0091, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.040488922841864, | |
| "grad_norm": 0.2611240744590759, | |
| "learning_rate": 3.9849921145550805e-05, | |
| "loss": 0.01, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.0557677616501144, | |
| "grad_norm": 0.09150966256856918, | |
| "learning_rate": 3.9327882852165795e-05, | |
| "loss": 0.009, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0557677616501144, | |
| "eval_loss": 0.03721309453248978, | |
| "eval_runtime": 185.6193, | |
| "eval_samples_per_second": 6.271, | |
| "eval_steps_per_second": 6.271, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.071046600458365, | |
| "grad_norm": 0.10902375727891922, | |
| "learning_rate": 3.880706064867926e-05, | |
| "loss": 0.0094, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.0863254392666155, | |
| "grad_norm": 0.3028370440006256, | |
| "learning_rate": 3.8287513882886196e-05, | |
| "loss": 0.0062, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.1016042780748663, | |
| "grad_norm": 0.11812803894281387, | |
| "learning_rate": 3.776930175724521e-05, | |
| "loss": 0.0102, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.116883116883117, | |
| "grad_norm": 0.21782122552394867, | |
| "learning_rate": 3.7252483322132386e-05, | |
| "loss": 0.0129, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.1321619556913673, | |
| "grad_norm": 0.09384758770465851, | |
| "learning_rate": 3.673711746911252e-05, | |
| "loss": 0.0106, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1321619556913673, | |
| "eval_loss": 0.03892069309949875, | |
| "eval_runtime": 185.889, | |
| "eval_samples_per_second": 6.262, | |
| "eval_steps_per_second": 6.262, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.147440794499618, | |
| "grad_norm": 0.16040179133415222, | |
| "learning_rate": 3.6223262924228344e-05, | |
| "loss": 0.0053, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.1627196333078684, | |
| "grad_norm": 0.04083335027098656, | |
| "learning_rate": 3.5710978241308733e-05, | |
| "loss": 0.0097, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.177998472116119, | |
| "grad_norm": 0.311753511428833, | |
| "learning_rate": 3.520032179529652e-05, | |
| "loss": 0.0101, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.19327731092437, | |
| "grad_norm": 0.36532482504844666, | |
| "learning_rate": 3.4691351775596564e-05, | |
| "loss": 0.0086, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.2085561497326203, | |
| "grad_norm": 0.25262030959129333, | |
| "learning_rate": 3.41841261794451e-05, | |
| "loss": 0.0061, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2085561497326203, | |
| "eval_loss": 0.0431986041367054, | |
| "eval_runtime": 186.9913, | |
| "eval_samples_per_second": 6.225, | |
| "eval_steps_per_second": 6.225, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.223834988540871, | |
| "grad_norm": 0.12558381259441376, | |
| "learning_rate": 3.367870280530101e-05, | |
| "loss": 0.0124, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.2391138273491213, | |
| "grad_norm": 0.3111749589443207, | |
| "learning_rate": 3.3175139246259536e-05, | |
| "loss": 0.0084, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.254392666157372, | |
| "grad_norm": 0.04653264582157135, | |
| "learning_rate": 3.2673492883489696e-05, | |
| "loss": 0.0061, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2696715049656224, | |
| "grad_norm": 0.6107545495033264, | |
| "learning_rate": 3.2173820879695535e-05, | |
| "loss": 0.0102, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.284950343773873, | |
| "grad_norm": 0.4089570641517639, | |
| "learning_rate": 3.1676180172602525e-05, | |
| "loss": 0.0075, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.284950343773873, | |
| "eval_loss": 0.04343220591545105, | |
| "eval_runtime": 188.264, | |
| "eval_samples_per_second": 6.183, | |
| "eval_steps_per_second": 6.183, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.300229182582124, | |
| "grad_norm": 0.12825438380241394, | |
| "learning_rate": 3.11806274684695e-05, | |
| "loss": 0.0081, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.3155080213903743, | |
| "grad_norm": 0.13207189738750458, | |
| "learning_rate": 3.068721923562688e-05, | |
| "loss": 0.0044, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.330786860198625, | |
| "grad_norm": 0.1463756561279297, | |
| "learning_rate": 3.019601169804216e-05, | |
| "loss": 0.0133, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.3460656990068753, | |
| "grad_norm": 0.39609789848327637, | |
| "learning_rate": 2.9707060828913225e-05, | |
| "loss": 0.0067, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.361344537815126, | |
| "grad_norm": 0.35121870040893555, | |
| "learning_rate": 2.9220422344290056e-05, | |
| "loss": 0.0089, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.361344537815126, | |
| "eval_loss": 0.04335245117545128, | |
| "eval_runtime": 189.2873, | |
| "eval_samples_per_second": 6.149, | |
| "eval_steps_per_second": 6.149, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3766233766233764, | |
| "grad_norm": 0.3331177532672882, | |
| "learning_rate": 2.873615169672601e-05, | |
| "loss": 0.0056, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.391902215431627, | |
| "grad_norm": 0.21099725365638733, | |
| "learning_rate": 2.8254304068958927e-05, | |
| "loss": 0.0081, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.407181054239878, | |
| "grad_norm": 0.0633828192949295, | |
| "learning_rate": 2.7774934367622996e-05, | |
| "loss": 0.0106, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4224598930481283, | |
| "grad_norm": 0.3705494999885559, | |
| "learning_rate": 2.7298097216992284e-05, | |
| "loss": 0.0072, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.437738731856379, | |
| "grad_norm": 0.2332771122455597, | |
| "learning_rate": 2.6823846952756125e-05, | |
| "loss": 0.0102, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.437738731856379, | |
| "eval_loss": 0.04621849209070206, | |
| "eval_runtime": 190.621, | |
| "eval_samples_per_second": 6.106, | |
| "eval_steps_per_second": 6.106, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4530175706646293, | |
| "grad_norm": 0.26837480068206787, | |
| "learning_rate": 2.6352237615827636e-05, | |
| "loss": 0.005, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.46829640947288, | |
| "grad_norm": 0.20386861264705658, | |
| "learning_rate": 2.5883322946185777e-05, | |
| "loss": 0.0079, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.483575248281131, | |
| "grad_norm": 0.6309589743614197, | |
| "learning_rate": 2.5417156376751562e-05, | |
| "loss": 0.0146, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.498854087089381, | |
| "grad_norm": 0.10873563587665558, | |
| "learning_rate": 2.4953791027299506e-05, | |
| "loss": 0.0042, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.514132925897632, | |
| "grad_norm": 0.11322731524705887, | |
| "learning_rate": 2.4493279698404493e-05, | |
| "loss": 0.0083, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.514132925897632, | |
| "eval_loss": 0.04650338739156723, | |
| "eval_runtime": 192.2547, | |
| "eval_samples_per_second": 6.054, | |
| "eval_steps_per_second": 6.054, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "grad_norm": 0.12000257521867752, | |
| "learning_rate": 2.403567486542518e-05, | |
| "loss": 0.0083, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.544690603514133, | |
| "grad_norm": 0.27765700221061707, | |
| "learning_rate": 2.3581028672524485e-05, | |
| "loss": 0.0096, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5599694423223838, | |
| "grad_norm": 0.1320173442363739, | |
| "learning_rate": 2.312939292672765e-05, | |
| "loss": 0.0069, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.575248281130634, | |
| "grad_norm": 0.26741477847099304, | |
| "learning_rate": 2.268081909201885e-05, | |
| "loss": 0.0039, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.5905271199388844, | |
| "grad_norm": 0.016012145206332207, | |
| "learning_rate": 2.2235358283476936e-05, | |
| "loss": 0.0131, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.5905271199388844, | |
| "eval_loss": 0.0442974716424942, | |
| "eval_runtime": 193.688, | |
| "eval_samples_per_second": 6.01, | |
| "eval_steps_per_second": 6.01, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.605805958747135, | |
| "grad_norm": 0.04935047775506973, | |
| "learning_rate": 2.179306126145075e-05, | |
| "loss": 0.0117, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.621084797555386, | |
| "grad_norm": 0.3039514124393463, | |
| "learning_rate": 2.1353978425775008e-05, | |
| "loss": 0.0081, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.6363636363636362, | |
| "grad_norm": 0.23148193955421448, | |
| "learning_rate": 2.091815981002731e-05, | |
| "loss": 0.0081, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.651642475171887, | |
| "grad_norm": 0.3215958774089813, | |
| "learning_rate": 2.0485655075826667e-05, | |
| "loss": 0.0116, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.6669213139801373, | |
| "grad_norm": 0.5242221355438232, | |
| "learning_rate": 2.0056513507174685e-05, | |
| "loss": 0.0054, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6669213139801373, | |
| "eval_loss": 0.04235563799738884, | |
| "eval_runtime": 194.4097, | |
| "eval_samples_per_second": 5.987, | |
| "eval_steps_per_second": 5.987, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.682200152788388, | |
| "grad_norm": 0.20493106544017792, | |
| "learning_rate": 1.963078400483953e-05, | |
| "loss": 0.0115, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.697478991596639, | |
| "grad_norm": 0.5069310069084167, | |
| "learning_rate": 1.9208515080783723e-05, | |
| "loss": 0.0084, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.712757830404889, | |
| "grad_norm": 0.2113790065050125, | |
| "learning_rate": 1.8789754852636245e-05, | |
| "loss": 0.0043, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.72803666921314, | |
| "grad_norm": 0.04292771592736244, | |
| "learning_rate": 1.837455103820942e-05, | |
| "loss": 0.014, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.7433155080213902, | |
| "grad_norm": 0.11845701187849045, | |
| "learning_rate": 1.7962950950061502e-05, | |
| "loss": 0.0038, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7433155080213902, | |
| "eval_loss": 0.04276303946971893, | |
| "eval_runtime": 194.915, | |
| "eval_samples_per_second": 5.972, | |
| "eval_steps_per_second": 5.972, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.758594346829641, | |
| "grad_norm": 0.08765993267297745, | |
| "learning_rate": 1.7555001490105488e-05, | |
| "loss": 0.0072, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.7738731856378918, | |
| "grad_norm": 0.17220765352249146, | |
| "learning_rate": 1.7150749144264462e-05, | |
| "loss": 0.0088, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.789152024446142, | |
| "grad_norm": 0.1879175752401352, | |
| "learning_rate": 1.6750239977174682e-05, | |
| "loss": 0.0067, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8044308632543924, | |
| "grad_norm": 0.38570868968963623, | |
| "learning_rate": 1.6353519626936397e-05, | |
| "loss": 0.012, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.819709702062643, | |
| "grad_norm": 0.08858808875083923, | |
| "learning_rate": 1.596063329991341e-05, | |
| "loss": 0.0074, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.819709702062643, | |
| "eval_loss": 0.04288734495639801, | |
| "eval_runtime": 196.135, | |
| "eval_samples_per_second": 5.935, | |
| "eval_steps_per_second": 5.935, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.834988540870894, | |
| "grad_norm": 0.09536246210336685, | |
| "learning_rate": 1.5571625765581832e-05, | |
| "loss": 0.0071, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8502673796791442, | |
| "grad_norm": 0.1967010349035263, | |
| "learning_rate": 1.5186541351428545e-05, | |
| "loss": 0.0085, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.865546218487395, | |
| "grad_norm": 0.7389079332351685, | |
| "learning_rate": 1.4805423937900087e-05, | |
| "loss": 0.0111, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.8808250572956453, | |
| "grad_norm": 0.21783044934272766, | |
| "learning_rate": 1.4428316953402526e-05, | |
| "loss": 0.0074, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.896103896103896, | |
| "grad_norm": 0.18299157917499542, | |
| "learning_rate": 1.4055263369352672e-05, | |
| "loss": 0.0056, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.896103896103896, | |
| "eval_loss": 0.04263545572757721, | |
| "eval_runtime": 197.0054, | |
| "eval_samples_per_second": 5.908, | |
| "eval_steps_per_second": 5.908, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.911382734912147, | |
| "grad_norm": 0.2797110378742218, | |
| "learning_rate": 1.3686305695281559e-05, | |
| "loss": 0.0119, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.926661573720397, | |
| "grad_norm": 0.4075024724006653, | |
| "learning_rate": 1.3321485973990494e-05, | |
| "loss": 0.0072, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.941940412528648, | |
| "grad_norm": 0.21931466460227966, | |
| "learning_rate": 1.2960845776760156e-05, | |
| "loss": 0.0137, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.9572192513368982, | |
| "grad_norm": 0.13403423130512238, | |
| "learning_rate": 1.2604426198613688e-05, | |
| "loss": 0.0057, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.972498090145149, | |
| "grad_norm": 0.42918461561203003, | |
| "learning_rate": 1.2252267853633798e-05, | |
| "loss": 0.007, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.972498090145149, | |
| "eval_loss": 0.042820464819669724, | |
| "eval_runtime": 197.5342, | |
| "eval_samples_per_second": 5.893, | |
| "eval_steps_per_second": 5.893, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9877769289533997, | |
| "grad_norm": 0.05887259915471077, | |
| "learning_rate": 1.1904410870334803e-05, | |
| "loss": 0.0122, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.0030557677616505, | |
| "grad_norm": 0.09298639744520187, | |
| "learning_rate": 1.1560894887090052e-05, | |
| "loss": 0.01, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.0183346065699, | |
| "grad_norm": 0.38059186935424805, | |
| "learning_rate": 1.1221759047615004e-05, | |
| "loss": 0.0032, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.033613445378151, | |
| "grad_norm": 0.04407782480120659, | |
| "learning_rate": 1.0887041996506859e-05, | |
| "loss": 0.0018, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.048892284186402, | |
| "grad_norm": 0.03980774059891701, | |
| "learning_rate": 1.0556781874841027e-05, | |
| "loss": 0.0034, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.048892284186402, | |
| "eval_loss": 0.043419938534498215, | |
| "eval_runtime": 197.6882, | |
| "eval_samples_per_second": 5.888, | |
| "eval_steps_per_second": 5.888, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.064171122994653, | |
| "grad_norm": 0.21608670055866241, | |
| "learning_rate": 1.0231016315824875e-05, | |
| "loss": 0.0042, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.0794499618029025, | |
| "grad_norm": 0.03518800064921379, | |
| "learning_rate": 9.909782440509491e-06, | |
| "loss": 0.004, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.094728800611153, | |
| "grad_norm": 0.46719732880592346, | |
| "learning_rate": 9.593116853559648e-06, | |
| "loss": 0.0048, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.110007639419404, | |
| "grad_norm": 0.05882187932729721, | |
| "learning_rate": 9.281055639082747e-06, | |
| "loss": 0.0048, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.125286478227655, | |
| "grad_norm": 0.0705062597990036, | |
| "learning_rate": 8.973634356517063e-06, | |
| "loss": 0.0051, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.125286478227655, | |
| "eval_loss": 0.04598911106586456, | |
| "eval_runtime": 198.719, | |
| "eval_samples_per_second": 5.858, | |
| "eval_steps_per_second": 5.858, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.140565317035906, | |
| "grad_norm": 0.21090030670166016, | |
| "learning_rate": 8.670888036579639e-06, | |
| "loss": 0.0043, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.1558441558441555, | |
| "grad_norm": 0.11752012372016907, | |
| "learning_rate": 8.372851177274604e-06, | |
| "loss": 0.0045, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.171122994652406, | |
| "grad_norm": 0.18993452191352844, | |
| "learning_rate": 8.079557739962128e-06, | |
| "loss": 0.0065, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.186401833460657, | |
| "grad_norm": 0.07488386332988739, | |
| "learning_rate": 7.791041145488454e-06, | |
| "loss": 0.0033, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.201680672268908, | |
| "grad_norm": 0.06048087030649185, | |
| "learning_rate": 7.507334270377619e-06, | |
| "loss": 0.0043, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.201680672268908, | |
| "eval_loss": 0.046420708298683167, | |
| "eval_runtime": 199.5972, | |
| "eval_samples_per_second": 5.832, | |
| "eval_steps_per_second": 5.832, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.2169595110771585, | |
| "grad_norm": 0.08631011098623276, | |
| "learning_rate": 7.228469443085206e-06, | |
| "loss": 0.0029, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.232238349885408, | |
| "grad_norm": 0.43059056997299194, | |
| "learning_rate": 6.954478440314427e-06, | |
| "loss": 0.0059, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.247517188693659, | |
| "grad_norm": 0.026996580883860588, | |
| "learning_rate": 6.685392483395259e-06, | |
| "loss": 0.0018, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.26279602750191, | |
| "grad_norm": 0.3803030252456665, | |
| "learning_rate": 6.421242234726682e-06, | |
| "loss": 0.0036, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.278074866310161, | |
| "grad_norm": 0.32429173588752747, | |
| "learning_rate": 6.1620577942827166e-06, | |
| "loss": 0.0023, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.278074866310161, | |
| "eval_loss": 0.04719507321715355, | |
| "eval_runtime": 199.3439, | |
| "eval_samples_per_second": 5.839, | |
| "eval_steps_per_second": 5.839, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.293353705118411, | |
| "grad_norm": 0.23215115070343018, | |
| "learning_rate": 5.907868696182584e-06, | |
| "loss": 0.0042, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.308632543926661, | |
| "grad_norm": 0.020175212994217873, | |
| "learning_rate": 5.658703905325186e-06, | |
| "loss": 0.0038, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.323911382734912, | |
| "grad_norm": 0.16418121755123138, | |
| "learning_rate": 5.414591814088627e-06, | |
| "loss": 0.0047, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.339190221543163, | |
| "grad_norm": 0.04667774215340614, | |
| "learning_rate": 5.17556023909489e-06, | |
| "loss": 0.0035, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.354469060351414, | |
| "grad_norm": 0.21239246428012848, | |
| "learning_rate": 4.941636418040058e-06, | |
| "loss": 0.0035, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.354469060351414, | |
| "eval_loss": 0.047712646424770355, | |
| "eval_runtime": 199.277, | |
| "eval_samples_per_second": 5.841, | |
| "eval_steps_per_second": 5.841, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.369747899159664, | |
| "grad_norm": 0.2187272608280182, | |
| "learning_rate": 4.7128470065906925e-06, | |
| "loss": 0.0033, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.385026737967914, | |
| "grad_norm": 0.1950971633195877, | |
| "learning_rate": 4.4892180753462744e-06, | |
| "loss": 0.0028, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.400305576776165, | |
| "grad_norm": 0.13193565607070923, | |
| "learning_rate": 4.270775106868586e-06, | |
| "loss": 0.0028, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.415584415584416, | |
| "grad_norm": 0.05055893212556839, | |
| "learning_rate": 4.057542992777868e-06, | |
| "loss": 0.0019, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.4308632543926665, | |
| "grad_norm": 0.12795984745025635, | |
| "learning_rate": 3.849546030916473e-06, | |
| "loss": 0.0021, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.4308632543926665, | |
| "eval_loss": 0.04882192611694336, | |
| "eval_runtime": 200.4256, | |
| "eval_samples_per_second": 5.808, | |
| "eval_steps_per_second": 5.808, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.446142093200916, | |
| "grad_norm": 0.13322241604328156, | |
| "learning_rate": 3.646807922580098e-06, | |
| "loss": 0.0017, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.461420932009167, | |
| "grad_norm": 0.1583353877067566, | |
| "learning_rate": 3.4493517698170164e-06, | |
| "loss": 0.0051, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.476699770817418, | |
| "grad_norm": 0.0266915000975132, | |
| "learning_rate": 3.2572000727956186e-06, | |
| "loss": 0.004, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.491978609625669, | |
| "grad_norm": 0.41155147552490234, | |
| "learning_rate": 3.070374727240466e-06, | |
| "loss": 0.0019, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.507257448433919, | |
| "grad_norm": 0.09082096815109253, | |
| "learning_rate": 2.8888970219373314e-06, | |
| "loss": 0.0021, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.507257448433919, | |
| "eval_loss": 0.0496671088039875, | |
| "eval_runtime": 200.9403, | |
| "eval_samples_per_second": 5.793, | |
| "eval_steps_per_second": 5.793, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.522536287242169, | |
| "grad_norm": 0.03273924067616463, | |
| "learning_rate": 2.7127876363072736e-06, | |
| "loss": 0.0018, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.53781512605042, | |
| "grad_norm": 0.3910587430000305, | |
| "learning_rate": 2.54206663805025e-06, | |
| "loss": 0.0026, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.553093964858671, | |
| "grad_norm": 0.07621679455041885, | |
| "learning_rate": 2.3767534808584125e-06, | |
| "loss": 0.001, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.5683728036669216, | |
| "grad_norm": 0.0307341106235981, | |
| "learning_rate": 2.2168670021993075e-06, | |
| "loss": 0.0023, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.583651642475171, | |
| "grad_norm": 0.25612154603004456, | |
| "learning_rate": 2.0624254211693894e-06, | |
| "loss": 0.0024, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.583651642475171, | |
| "eval_loss": 0.05014451965689659, | |
| "eval_runtime": 200.6569, | |
| "eval_samples_per_second": 5.801, | |
| "eval_steps_per_second": 5.801, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.598930481283422, | |
| "grad_norm": 0.05506656691431999, | |
| "learning_rate": 1.9134463364179177e-06, | |
| "loss": 0.0031, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.614209320091673, | |
| "grad_norm": 0.2952725887298584, | |
| "learning_rate": 1.7699467241416024e-06, | |
| "loss": 0.0038, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.629488158899924, | |
| "grad_norm": 0.23653660714626312, | |
| "learning_rate": 1.6319429361501714e-06, | |
| "loss": 0.0023, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.6447669977081745, | |
| "grad_norm": 0.014740560203790665, | |
| "learning_rate": 1.4994506980030577e-06, | |
| "loss": 0.0012, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.660045836516424, | |
| "grad_norm": 0.38012057542800903, | |
| "learning_rate": 1.3724851072174917e-06, | |
| "loss": 0.0013, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.660045836516424, | |
| "eval_loss": 0.050505463033914566, | |
| "eval_runtime": 200.7331, | |
| "eval_samples_per_second": 5.799, | |
| "eval_steps_per_second": 5.799, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.675324675324675, | |
| "grad_norm": 0.1817777156829834, | |
| "learning_rate": 1.251060631548112e-06, | |
| "loss": 0.0057, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.690603514132926, | |
| "grad_norm": 0.08571702241897583, | |
| "learning_rate": 1.135191107338368e-06, | |
| "loss": 0.0013, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 0.49542587995529175, | |
| "learning_rate": 1.0248897379438904e-06, | |
| "loss": 0.0028, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.721161191749427, | |
| "grad_norm": 0.037737760692834854, | |
| "learning_rate": 9.201690922279405e-07, | |
| "loss": 0.0024, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.736440030557677, | |
| "grad_norm": 0.04411200433969498, | |
| "learning_rate": 8.210411031291776e-07, | |
| "loss": 0.0031, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.736440030557677, | |
| "eval_loss": 0.051058944314718246, | |
| "eval_runtime": 201.9356, | |
| "eval_samples_per_second": 5.764, | |
| "eval_steps_per_second": 5.764, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.751718869365928, | |
| "grad_norm": 0.032969068735837936, | |
| "learning_rate": 7.275170663019415e-07, | |
| "loss": 0.002, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.766997708174179, | |
| "grad_norm": 0.40801408886909485, | |
| "learning_rate": 6.396076388290484e-07, | |
| "loss": 0.0036, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.7822765469824295, | |
| "grad_norm": 0.027664145454764366, | |
| "learning_rate": 5.573228380074736e-07, | |
| "loss": 0.0012, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.79755538579068, | |
| "grad_norm": 0.012874705716967583, | |
| "learning_rate": 4.806720402068477e-07, | |
| "loss": 0.0019, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.81283422459893, | |
| "grad_norm": 0.03708149120211601, | |
| "learning_rate": 4.0966397980100604e-07, | |
| "loss": 0.0031, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.81283422459893, | |
| "eval_loss": 0.05105143412947655, | |
| "eval_runtime": 202.4138, | |
| "eval_samples_per_second": 5.751, | |
| "eval_steps_per_second": 5.751, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.828113063407181, | |
| "grad_norm": 0.02528611198067665, | |
| "learning_rate": 3.4430674817274575e-07, | |
| "loss": 0.0018, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.843391902215432, | |
| "grad_norm": 0.05252419784665108, | |
| "learning_rate": 2.8460779279176896e-07, | |
| "loss": 0.002, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.8586707410236825, | |
| "grad_norm": 0.026198415085673332, | |
| "learning_rate": 2.3057391636606696e-07, | |
| "loss": 0.0012, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.873949579831933, | |
| "grad_norm": 0.46497654914855957, | |
| "learning_rate": 1.8221127606674605e-07, | |
| "loss": 0.0044, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.889228418640183, | |
| "grad_norm": 0.11807846277952194, | |
| "learning_rate": 1.3952538282639982e-07, | |
| "loss": 0.0023, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.889228418640183, | |
| "eval_loss": 0.051153309643268585, | |
| "eval_runtime": 201.9276, | |
| "eval_samples_per_second": 5.764, | |
| "eval_steps_per_second": 5.764, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.904507257448434, | |
| "grad_norm": 0.03819113224744797, | |
| "learning_rate": 1.025211007111615e-07, | |
| "loss": 0.0009, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.919786096256685, | |
| "grad_norm": 0.014340424910187721, | |
| "learning_rate": 7.120264636643615e-08, | |
| "loss": 0.0008, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.935064935064935, | |
| "grad_norm": 0.02623898908495903, | |
| "learning_rate": 4.5573588536407254e-08, | |
| "loss": 0.0034, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.950343773873186, | |
| "grad_norm": 0.42800572514533997, | |
| "learning_rate": 2.5636847657367623e-08, | |
| "loss": 0.0039, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.965622612681436, | |
| "grad_norm": 0.08229000121355057, | |
| "learning_rate": 1.1394695524963306e-08, | |
| "loss": 0.0023, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.965622612681436, | |
| "eval_loss": 0.051108263432979584, | |
| "eval_runtime": 202.0569, | |
| "eval_samples_per_second": 5.761, | |
| "eval_steps_per_second": 5.761, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.980901451489687, | |
| "grad_norm": 0.08490585535764694, | |
| "learning_rate": 2.8487550352951363e-09, | |
| "loss": 0.004, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 4.9961802902979375, | |
| "grad_norm": 0.07834521681070328, | |
| "learning_rate": 0.0, | |
| "loss": 0.0033, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 4.9961802902979375, | |
| "step": 3270, | |
| "total_flos": 8.213792003390177e+17, | |
| "train_loss": 0.04069158185845873, | |
| "train_runtime": 42808.6048, | |
| "train_samples_per_second": 1.223, | |
| "train_steps_per_second": 0.076 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3270, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.213792003390177e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |