| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 3777, | |
| "global_step": 113298, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.026478843404120107, | |
| "grad_norm": 2.1454062461853027, | |
| "learning_rate": 0.00019929327373275146, | |
| "loss": 0.8356, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.052957686808240215, | |
| "grad_norm": 1.2584812641143799, | |
| "learning_rate": 0.00019841163271435891, | |
| "loss": 0.5745, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.07943653021236033, | |
| "grad_norm": 1.0932960510253906, | |
| "learning_rate": 0.00019752822488029826, | |
| "loss": 0.5041, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.10591537361648043, | |
| "grad_norm": 2.201864004135132, | |
| "learning_rate": 0.00019664481704623757, | |
| "loss": 0.4696, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.13239421702060053, | |
| "grad_norm": 1.9619899988174438, | |
| "learning_rate": 0.0001957614092121769, | |
| "loss": 0.4524, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.15887306042472066, | |
| "grad_norm": 2.994553327560425, | |
| "learning_rate": 0.00019487800137811624, | |
| "loss": 0.418, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.18535190382884076, | |
| "grad_norm": 2.0418312549591064, | |
| "learning_rate": 0.00019399459354405555, | |
| "loss": 0.4174, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.2000211830747233, | |
| "eval_loss": 0.40778520703315735, | |
| "eval_runtime": 421.2687, | |
| "eval_samples_per_second": 10.936, | |
| "eval_steps_per_second": 5.469, | |
| "step": 3777 | |
| }, | |
| { | |
| "epoch": 0.21183074723296086, | |
| "grad_norm": 1.8520241975784302, | |
| "learning_rate": 0.0001931111857099949, | |
| "loss": 0.4004, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.23830959063708096, | |
| "grad_norm": 1.4545440673828125, | |
| "learning_rate": 0.00019222954469160232, | |
| "loss": 0.3804, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.26478843404120106, | |
| "grad_norm": 2.6964781284332275, | |
| "learning_rate": 0.00019134613685754166, | |
| "loss": 0.3777, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.2912672774453212, | |
| "grad_norm": 2.227689027786255, | |
| "learning_rate": 0.00019046272902348098, | |
| "loss": 0.3642, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.3177461208494413, | |
| "grad_norm": 2.4446637630462646, | |
| "learning_rate": 0.00018957932118942032, | |
| "loss": 0.359, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.3442249642535614, | |
| "grad_norm": 1.461595892906189, | |
| "learning_rate": 0.00018869591335535964, | |
| "loss": 0.3509, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.3707038076576815, | |
| "grad_norm": 1.95978581905365, | |
| "learning_rate": 0.00018781250552129896, | |
| "loss": 0.3561, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.3971826510618016, | |
| "grad_norm": 2.1562278270721436, | |
| "learning_rate": 0.0001869290976872383, | |
| "loss": 0.34, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.4000423661494466, | |
| "eval_loss": 0.35886627435684204, | |
| "eval_runtime": 421.2564, | |
| "eval_samples_per_second": 10.936, | |
| "eval_steps_per_second": 5.469, | |
| "step": 7554 | |
| }, | |
| { | |
| "epoch": 0.4236614944659217, | |
| "grad_norm": 2.0626142024993896, | |
| "learning_rate": 0.00018604745666884575, | |
| "loss": 0.3374, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.4501403378700418, | |
| "grad_norm": 1.5433763265609741, | |
| "learning_rate": 0.00018516581565045318, | |
| "loss": 0.3371, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.4766191812741619, | |
| "grad_norm": 2.220177412033081, | |
| "learning_rate": 0.00018428240781639252, | |
| "loss": 0.3325, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.503098024678282, | |
| "grad_norm": 1.1187978982925415, | |
| "learning_rate": 0.00018339899998233187, | |
| "loss": 0.3245, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.5295768680824021, | |
| "grad_norm": 2.554443836212158, | |
| "learning_rate": 0.00018251559214827118, | |
| "loss": 0.3252, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.5560557114865222, | |
| "grad_norm": 1.8116191625595093, | |
| "learning_rate": 0.0001816321843142105, | |
| "loss": 0.3153, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.5825345548906424, | |
| "grad_norm": 1.6784979104995728, | |
| "learning_rate": 0.00018074877648014982, | |
| "loss": 0.3104, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.6000635492241699, | |
| "eval_loss": 0.3305271863937378, | |
| "eval_runtime": 421.3809, | |
| "eval_samples_per_second": 10.933, | |
| "eval_steps_per_second": 5.468, | |
| "step": 11331 | |
| }, | |
| { | |
| "epoch": 0.6090133982947625, | |
| "grad_norm": 2.8816235065460205, | |
| "learning_rate": 0.00017986536864608916, | |
| "loss": 0.3211, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.6354922416988826, | |
| "grad_norm": 0.8169271945953369, | |
| "learning_rate": 0.0001789819608120285, | |
| "loss": 0.3172, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.6619710851030027, | |
| "grad_norm": 1.5912612676620483, | |
| "learning_rate": 0.00017809855297796782, | |
| "loss": 0.3092, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.6884499285071228, | |
| "grad_norm": 1.9082287549972534, | |
| "learning_rate": 0.00017721691195957527, | |
| "loss": 0.302, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.7149287719112429, | |
| "grad_norm": 1.7852967977523804, | |
| "learning_rate": 0.0001763335041255146, | |
| "loss": 0.3026, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.741407615315363, | |
| "grad_norm": 1.7745851278305054, | |
| "learning_rate": 0.00017545009629145393, | |
| "loss": 0.2926, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.7678864587194831, | |
| "grad_norm": 1.4642239809036255, | |
| "learning_rate": 0.00017456668845739325, | |
| "loss": 0.2987, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.7943653021236032, | |
| "grad_norm": 1.8523545265197754, | |
| "learning_rate": 0.00017368328062333257, | |
| "loss": 0.2901, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.8000847322988932, | |
| "eval_loss": 0.3112636208534241, | |
| "eval_runtime": 420.9037, | |
| "eval_samples_per_second": 10.945, | |
| "eval_steps_per_second": 5.474, | |
| "step": 15108 | |
| }, | |
| { | |
| "epoch": 0.8208441455277233, | |
| "grad_norm": 3.936985492706299, | |
| "learning_rate": 0.00017280163960494002, | |
| "loss": 0.289, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.8473229889318434, | |
| "grad_norm": 2.120314598083496, | |
| "learning_rate": 0.00017191823177087936, | |
| "loss": 0.2947, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.8738018323359635, | |
| "grad_norm": 2.1625630855560303, | |
| "learning_rate": 0.00017103659075248678, | |
| "loss": 0.2897, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.9002806757400836, | |
| "grad_norm": 1.2618048191070557, | |
| "learning_rate": 0.00017015318291842613, | |
| "loss": 0.2919, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.9267595191442037, | |
| "grad_norm": 1.7789500951766968, | |
| "learning_rate": 0.00016926977508436545, | |
| "loss": 0.2841, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.9532383625483238, | |
| "grad_norm": 0.8625043034553528, | |
| "learning_rate": 0.0001683863672503048, | |
| "loss": 0.2813, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.979717205952444, | |
| "grad_norm": 2.622668981552124, | |
| "learning_rate": 0.00016750295941624413, | |
| "loss": 0.2823, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.0001059153736165, | |
| "eval_loss": 0.2998111844062805, | |
| "eval_runtime": 420.9652, | |
| "eval_samples_per_second": 10.944, | |
| "eval_steps_per_second": 5.473, | |
| "step": 18885 | |
| }, | |
| { | |
| "epoch": 1.006196049356564, | |
| "grad_norm": 1.9936221837997437, | |
| "learning_rate": 0.00016661955158218342, | |
| "loss": 0.2725, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.0326748927606841, | |
| "grad_norm": 1.7693616151809692, | |
| "learning_rate": 0.00016573614374812277, | |
| "loss": 0.2572, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.0591537361648042, | |
| "grad_norm": 1.0694758892059326, | |
| "learning_rate": 0.00016485273591406208, | |
| "loss": 0.2526, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.0856325795689243, | |
| "grad_norm": 2.6468887329101562, | |
| "learning_rate": 0.00016396932808000143, | |
| "loss": 0.257, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.1121114229730444, | |
| "grad_norm": 2.7952466011047363, | |
| "learning_rate": 0.00016308768706160885, | |
| "loss": 0.2583, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.1385902663771645, | |
| "grad_norm": 4.5573625564575195, | |
| "learning_rate": 0.0001622042792275482, | |
| "loss": 0.2601, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.1650691097812849, | |
| "grad_norm": 1.8404908180236816, | |
| "learning_rate": 0.00016132087139348754, | |
| "loss": 0.2475, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.1915479531854047, | |
| "grad_norm": 1.9419597387313843, | |
| "learning_rate": 0.00016043746355942686, | |
| "loss": 0.2591, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.2001270984483399, | |
| "eval_loss": 0.29582297801971436, | |
| "eval_runtime": 421.9827, | |
| "eval_samples_per_second": 10.918, | |
| "eval_steps_per_second": 5.46, | |
| "step": 22662 | |
| }, | |
| { | |
| "epoch": 1.218026796589525, | |
| "grad_norm": 3.7483320236206055, | |
| "learning_rate": 0.00015955405572536617, | |
| "loss": 0.2438, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.2445056399936452, | |
| "grad_norm": 1.0397343635559082, | |
| "learning_rate": 0.00015867418152264176, | |
| "loss": 0.2546, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.2709844833977653, | |
| "grad_norm": 1.655588150024414, | |
| "learning_rate": 0.00015779077368858108, | |
| "loss": 0.2517, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.2974633268018854, | |
| "grad_norm": 3.5589778423309326, | |
| "learning_rate": 0.00015690736585452042, | |
| "loss": 0.2503, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.3239421702060055, | |
| "grad_norm": 1.4428561925888062, | |
| "learning_rate": 0.00015602395802045974, | |
| "loss": 0.2521, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.3504210136101256, | |
| "grad_norm": 1.6518936157226562, | |
| "learning_rate": 0.00015514055018639905, | |
| "loss": 0.2513, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.3768998570142457, | |
| "grad_norm": 1.4650779962539673, | |
| "learning_rate": 0.0001542571423523384, | |
| "loss": 0.2597, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.400148281523063, | |
| "eval_loss": 0.2870725393295288, | |
| "eval_runtime": 421.6939, | |
| "eval_samples_per_second": 10.925, | |
| "eval_steps_per_second": 5.464, | |
| "step": 26439 | |
| }, | |
| { | |
| "epoch": 1.4033787004183658, | |
| "grad_norm": 2.4973056316375732, | |
| "learning_rate": 0.0001533737345182777, | |
| "loss": 0.2438, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.4298575438224859, | |
| "grad_norm": 1.4028382301330566, | |
| "learning_rate": 0.00015249032668421703, | |
| "loss": 0.2414, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.456336387226606, | |
| "grad_norm": 1.586595058441162, | |
| "learning_rate": 0.00015160691885015637, | |
| "loss": 0.245, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 1.482815230630726, | |
| "grad_norm": 2.191901922225952, | |
| "learning_rate": 0.00015072527783176383, | |
| "loss": 0.246, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.5092940740348462, | |
| "grad_norm": 1.2381248474121094, | |
| "learning_rate": 0.00014984186999770314, | |
| "loss": 0.2449, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 1.5357729174389663, | |
| "grad_norm": 1.502170443534851, | |
| "learning_rate": 0.00014895846216364246, | |
| "loss": 0.2448, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.5622517608430864, | |
| "grad_norm": 2.206437349319458, | |
| "learning_rate": 0.0001480768211452499, | |
| "loss": 0.2488, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 1.5887306042472065, | |
| "grad_norm": 2.766979694366455, | |
| "learning_rate": 0.00014719341331118925, | |
| "loss": 0.2536, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.6001694645977864, | |
| "eval_loss": 0.2894779145717621, | |
| "eval_runtime": 421.1711, | |
| "eval_samples_per_second": 10.939, | |
| "eval_steps_per_second": 5.47, | |
| "step": 30216 | |
| }, | |
| { | |
| "epoch": 1.6152094476513266, | |
| "grad_norm": 1.9961110353469849, | |
| "learning_rate": 0.0001463100054771286, | |
| "loss": 0.2312, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 1.6416882910554467, | |
| "grad_norm": 3.271880626678467, | |
| "learning_rate": 0.0001454265976430679, | |
| "loss": 0.2387, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.6681671344595668, | |
| "grad_norm": 0.7093228697776794, | |
| "learning_rate": 0.00014454318980900723, | |
| "loss": 0.239, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 1.6946459778636869, | |
| "grad_norm": 1.9597697257995605, | |
| "learning_rate": 0.00014366154879061468, | |
| "loss": 0.2335, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.721124821267807, | |
| "grad_norm": 2.2032573223114014, | |
| "learning_rate": 0.00014277814095655403, | |
| "loss": 0.2399, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 1.747603664671927, | |
| "grad_norm": 1.6669458150863647, | |
| "learning_rate": 0.00014189473312249332, | |
| "loss": 0.2472, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 1.7740825080760474, | |
| "grad_norm": 2.5589282512664795, | |
| "learning_rate": 0.00014101132528843266, | |
| "loss": 0.2353, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 1.8001906476725096, | |
| "eval_loss": 0.2819114029407501, | |
| "eval_runtime": 420.9242, | |
| "eval_samples_per_second": 10.945, | |
| "eval_steps_per_second": 5.474, | |
| "step": 33993 | |
| }, | |
| { | |
| "epoch": 1.8005613514801673, | |
| "grad_norm": 2.796818971633911, | |
| "learning_rate": 0.000140127917454372, | |
| "loss": 0.2399, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 1.8270401948842876, | |
| "grad_norm": 1.920599341392517, | |
| "learning_rate": 0.00013924450962031132, | |
| "loss": 0.2363, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 1.8535190382884075, | |
| "grad_norm": 1.8848789930343628, | |
| "learning_rate": 0.00013836110178625066, | |
| "loss": 0.2357, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 1.8799978816925278, | |
| "grad_norm": 2.0130763053894043, | |
| "learning_rate": 0.00013747769395218998, | |
| "loss": 0.2393, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 1.9064767250966477, | |
| "grad_norm": 3.166252613067627, | |
| "learning_rate": 0.00013659605293379743, | |
| "loss": 0.2369, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 1.932955568500768, | |
| "grad_norm": 2.393334150314331, | |
| "learning_rate": 0.00013571264509973675, | |
| "loss": 0.2383, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 1.9594344119048879, | |
| "grad_norm": 1.5478434562683105, | |
| "learning_rate": 0.0001348292372656761, | |
| "loss": 0.2385, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 1.9859132553090082, | |
| "grad_norm": 1.8537359237670898, | |
| "learning_rate": 0.0001339458294316154, | |
| "loss": 0.2236, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.000211830747233, | |
| "eval_loss": 0.278350293636322, | |
| "eval_runtime": 420.507, | |
| "eval_samples_per_second": 10.956, | |
| "eval_steps_per_second": 5.479, | |
| "step": 37770 | |
| }, | |
| { | |
| "epoch": 2.012392098713128, | |
| "grad_norm": 1.4909721612930298, | |
| "learning_rate": 0.00013306242159755473, | |
| "loss": 0.2094, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.0388709421172484, | |
| "grad_norm": 4.634109020233154, | |
| "learning_rate": 0.00013217901376349407, | |
| "loss": 0.2012, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.0653497855213683, | |
| "grad_norm": 2.142557382583618, | |
| "learning_rate": 0.0001312973727451015, | |
| "loss": 0.2084, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.0918286289254886, | |
| "grad_norm": 4.682730674743652, | |
| "learning_rate": 0.00013041396491104084, | |
| "loss": 0.2089, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.1183074723296085, | |
| "grad_norm": 2.030521869659424, | |
| "learning_rate": 0.0001295323238926483, | |
| "loss": 0.2109, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.144786315733729, | |
| "grad_norm": 2.6542880535125732, | |
| "learning_rate": 0.00012864891605858763, | |
| "loss": 0.2, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 2.1712651591378487, | |
| "grad_norm": 1.762758493423462, | |
| "learning_rate": 0.00012776550822452692, | |
| "loss": 0.2139, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.197744002541969, | |
| "grad_norm": 0.5812512636184692, | |
| "learning_rate": 0.00012688210039046627, | |
| "loss": 0.2055, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 2.2002330138219564, | |
| "eval_loss": 0.2898082733154297, | |
| "eval_runtime": 421.4286, | |
| "eval_samples_per_second": 10.932, | |
| "eval_steps_per_second": 5.467, | |
| "step": 41547 | |
| }, | |
| { | |
| "epoch": 2.224222845946089, | |
| "grad_norm": 1.5051369667053223, | |
| "learning_rate": 0.00012599869255640558, | |
| "loss": 0.2099, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.250701689350209, | |
| "grad_norm": 2.4985218048095703, | |
| "learning_rate": 0.00012511528472234493, | |
| "loss": 0.2038, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 2.277180532754329, | |
| "grad_norm": 3.212822914123535, | |
| "learning_rate": 0.00012423187688828427, | |
| "loss": 0.2008, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.3036593761584494, | |
| "grad_norm": 1.2164196968078613, | |
| "learning_rate": 0.00012334846905422356, | |
| "loss": 0.2078, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 2.3301382195625697, | |
| "grad_norm": 0.8281005024909973, | |
| "learning_rate": 0.00012246682803583104, | |
| "loss": 0.208, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.3566170629666896, | |
| "grad_norm": 2.5285778045654297, | |
| "learning_rate": 0.00012158342020177036, | |
| "loss": 0.2105, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 2.3830959063708095, | |
| "grad_norm": 0.9707619547843933, | |
| "learning_rate": 0.00012070001236770969, | |
| "loss": 0.2129, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.4002541968966797, | |
| "eval_loss": 0.26870912313461304, | |
| "eval_runtime": 421.9405, | |
| "eval_samples_per_second": 10.919, | |
| "eval_steps_per_second": 5.46, | |
| "step": 45324 | |
| }, | |
| { | |
| "epoch": 2.40957474977493, | |
| "grad_norm": 1.2378835678100586, | |
| "learning_rate": 0.000119816604533649, | |
| "loss": 0.2072, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 2.43605359317905, | |
| "grad_norm": 1.5940284729003906, | |
| "learning_rate": 0.00011893319669958833, | |
| "loss": 0.2031, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.46253243658317, | |
| "grad_norm": 2.2567298412323, | |
| "learning_rate": 0.00011804978886552766, | |
| "loss": 0.205, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 2.4890112799872903, | |
| "grad_norm": 1.5430265665054321, | |
| "learning_rate": 0.000117166381031467, | |
| "loss": 0.2041, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.51549012339141, | |
| "grad_norm": 2.6054415702819824, | |
| "learning_rate": 0.00011628297319740632, | |
| "loss": 0.2055, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 2.5419689667955305, | |
| "grad_norm": 1.0687452554702759, | |
| "learning_rate": 0.00011540133217901376, | |
| "loss": 0.2032, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.5684478101996504, | |
| "grad_norm": 1.7363426685333252, | |
| "learning_rate": 0.00011451792434495309, | |
| "loss": 0.1997, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 2.5949266536037707, | |
| "grad_norm": 1.5278480052947998, | |
| "learning_rate": 0.00011363451651089244, | |
| "loss": 0.2001, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 2.6002753799714027, | |
| "eval_loss": 0.2719252407550812, | |
| "eval_runtime": 420.6405, | |
| "eval_samples_per_second": 10.952, | |
| "eval_steps_per_second": 5.477, | |
| "step": 49101 | |
| }, | |
| { | |
| "epoch": 2.6214054970078906, | |
| "grad_norm": 5.551360130310059, | |
| "learning_rate": 0.00011275110867683177, | |
| "loss": 0.2067, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 2.647884340412011, | |
| "grad_norm": 0.9148823618888855, | |
| "learning_rate": 0.00011186946765843919, | |
| "loss": 0.202, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 2.674363183816131, | |
| "grad_norm": 2.075822591781616, | |
| "learning_rate": 0.00011098605982437854, | |
| "loss": 0.1996, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 2.700842027220251, | |
| "grad_norm": 4.498257160186768, | |
| "learning_rate": 0.00011010441880598599, | |
| "loss": 0.1908, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 2.727320870624371, | |
| "grad_norm": 3.7217941284179688, | |
| "learning_rate": 0.0001092210109719253, | |
| "loss": 0.2013, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 2.7537997140284913, | |
| "grad_norm": 4.8926496505737305, | |
| "learning_rate": 0.00010833760313786463, | |
| "loss": 0.2096, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 2.780278557432611, | |
| "grad_norm": 1.4642279148101807, | |
| "learning_rate": 0.00010745419530380396, | |
| "loss": 0.2108, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 2.800296563046126, | |
| "eval_loss": 0.2713735103607178, | |
| "eval_runtime": 420.8891, | |
| "eval_samples_per_second": 10.946, | |
| "eval_steps_per_second": 5.474, | |
| "step": 52878 | |
| }, | |
| { | |
| "epoch": 2.8067574008367315, | |
| "grad_norm": 2.818092107772827, | |
| "learning_rate": 0.0001065707874697433, | |
| "loss": 0.2011, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 2.8332362442408514, | |
| "grad_norm": 2.3732500076293945, | |
| "learning_rate": 0.00010568737963568261, | |
| "loss": 0.2017, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 2.8597150876449717, | |
| "grad_norm": 5.441200256347656, | |
| "learning_rate": 0.00010480397180162194, | |
| "loss": 0.2024, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 2.8861939310490916, | |
| "grad_norm": 2.660677909851074, | |
| "learning_rate": 0.00010392056396756127, | |
| "loss": 0.2062, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 2.912672774453212, | |
| "grad_norm": 3.46864652633667, | |
| "learning_rate": 0.00010303892294916872, | |
| "loss": 0.1979, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 2.9391516178573323, | |
| "grad_norm": 1.95028817653656, | |
| "learning_rate": 0.00010215551511510804, | |
| "loss": 0.1963, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 2.965630461261452, | |
| "grad_norm": 2.8324038982391357, | |
| "learning_rate": 0.00010127210728104737, | |
| "loss": 0.2033, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 2.992109304665572, | |
| "grad_norm": 2.3465640544891357, | |
| "learning_rate": 0.0001003886994469867, | |
| "loss": 0.2023, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 3.0003177461208494, | |
| "eval_loss": 0.26497137546539307, | |
| "eval_runtime": 421.6284, | |
| "eval_samples_per_second": 10.927, | |
| "eval_steps_per_second": 5.465, | |
| "step": 56655 | |
| }, | |
| { | |
| "epoch": 3.0185881480696923, | |
| "grad_norm": 2.5295658111572266, | |
| "learning_rate": 9.950705842859415e-05, | |
| "loss": 0.1767, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 3.045066991473812, | |
| "grad_norm": 2.448827028274536, | |
| "learning_rate": 9.862365059453348e-05, | |
| "loss": 0.1701, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 3.0715458348779325, | |
| "grad_norm": 2.7891571521759033, | |
| "learning_rate": 9.774200957614092e-05, | |
| "loss": 0.1793, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 3.0980246782820524, | |
| "grad_norm": 1.5115572214126587, | |
| "learning_rate": 9.685860174208026e-05, | |
| "loss": 0.1749, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 3.1245035216861727, | |
| "grad_norm": 4.49403190612793, | |
| "learning_rate": 9.597519390801958e-05, | |
| "loss": 0.1666, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 3.150982365090293, | |
| "grad_norm": 2.545595645904541, | |
| "learning_rate": 9.509178607395891e-05, | |
| "loss": 0.1755, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 3.177461208494413, | |
| "grad_norm": 1.3802286386489868, | |
| "learning_rate": 9.420837823989823e-05, | |
| "loss": 0.1811, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 3.200338929195573, | |
| "eval_loss": 0.2708809971809387, | |
| "eval_runtime": 421.9193, | |
| "eval_samples_per_second": 10.919, | |
| "eval_steps_per_second": 5.461, | |
| "step": 60432 | |
| }, | |
| { | |
| "epoch": 3.2039400518985333, | |
| "grad_norm": 0.7773789167404175, | |
| "learning_rate": 9.332497040583756e-05, | |
| "loss": 0.1714, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 3.230418895302653, | |
| "grad_norm": 0.647718071937561, | |
| "learning_rate": 9.244332938744501e-05, | |
| "loss": 0.1742, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 3.2568977387067735, | |
| "grad_norm": 2.515371322631836, | |
| "learning_rate": 9.155992155338434e-05, | |
| "loss": 0.1725, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 3.2833765821108933, | |
| "grad_norm": 3.261064291000366, | |
| "learning_rate": 9.067651371932367e-05, | |
| "loss": 0.1713, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 3.3098554255150137, | |
| "grad_norm": 2.5491161346435547, | |
| "learning_rate": 8.9793105885263e-05, | |
| "loss": 0.1702, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 3.3363342689191335, | |
| "grad_norm": 2.7108869552612305, | |
| "learning_rate": 8.890969805120232e-05, | |
| "loss": 0.1756, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 3.362813112323254, | |
| "grad_norm": 2.062349319458008, | |
| "learning_rate": 8.802629021714165e-05, | |
| "loss": 0.174, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 3.3892919557273737, | |
| "grad_norm": 0.7416903376579285, | |
| "learning_rate": 8.714288238308098e-05, | |
| "loss": 0.1702, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 3.400360112270296, | |
| "eval_loss": 0.26548436284065247, | |
| "eval_runtime": 421.3467, | |
| "eval_samples_per_second": 10.934, | |
| "eval_steps_per_second": 5.468, | |
| "step": 64209 | |
| }, | |
| { | |
| "epoch": 3.415770799131494, | |
| "grad_norm": 1.9311342239379883, | |
| "learning_rate": 8.625947454902031e-05, | |
| "loss": 0.1744, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 3.442249642535614, | |
| "grad_norm": 2.2730724811553955, | |
| "learning_rate": 8.537606671495964e-05, | |
| "loss": 0.172, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 3.4687284859397343, | |
| "grad_norm": 2.294510841369629, | |
| "learning_rate": 8.449442569656708e-05, | |
| "loss": 0.1732, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 3.495207329343854, | |
| "grad_norm": 2.0610592365264893, | |
| "learning_rate": 8.361101786250642e-05, | |
| "loss": 0.1761, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 3.5216861727479745, | |
| "grad_norm": 5.357800483703613, | |
| "learning_rate": 8.272761002844574e-05, | |
| "loss": 0.1773, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 3.5481650161520943, | |
| "grad_norm": 1.0783665180206299, | |
| "learning_rate": 8.184420219438507e-05, | |
| "loss": 0.1755, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 3.5746438595562147, | |
| "grad_norm": 2.4767355918884277, | |
| "learning_rate": 8.096079436032438e-05, | |
| "loss": 0.176, | |
| "step": 67500 | |
| }, | |
| { | |
| "epoch": 3.600381295345019, | |
| "eval_loss": 0.26650214195251465, | |
| "eval_runtime": 421.0598, | |
| "eval_samples_per_second": 10.941, | |
| "eval_steps_per_second": 5.472, | |
| "step": 67986 | |
| }, | |
| { | |
| "epoch": 3.6011227029603345, | |
| "grad_norm": 3.160248041152954, | |
| "learning_rate": 8.007915334193185e-05, | |
| "loss": 0.1671, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 3.627601546364455, | |
| "grad_norm": 2.2208316326141357, | |
| "learning_rate": 7.919574550787116e-05, | |
| "loss": 0.1766, | |
| "step": 68500 | |
| }, | |
| { | |
| "epoch": 3.6540803897685747, | |
| "grad_norm": 3.152665615081787, | |
| "learning_rate": 7.83123376738105e-05, | |
| "loss": 0.18, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 3.680559233172695, | |
| "grad_norm": 0.9788137078285217, | |
| "learning_rate": 7.742892983974983e-05, | |
| "loss": 0.1721, | |
| "step": 69500 | |
| }, | |
| { | |
| "epoch": 3.707038076576815, | |
| "grad_norm": 1.457961082458496, | |
| "learning_rate": 7.654728882135728e-05, | |
| "loss": 0.1694, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 3.7335169199809353, | |
| "grad_norm": 1.4032424688339233, | |
| "learning_rate": 7.56638809872966e-05, | |
| "loss": 0.1633, | |
| "step": 70500 | |
| }, | |
| { | |
| "epoch": 3.7599957633850556, | |
| "grad_norm": 4.617145538330078, | |
| "learning_rate": 7.478223996890404e-05, | |
| "loss": 0.1785, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 3.7864746067891755, | |
| "grad_norm": 1.1956781148910522, | |
| "learning_rate": 7.389883213484337e-05, | |
| "loss": 0.1702, | |
| "step": 71500 | |
| }, | |
| { | |
| "epoch": 3.8004024784197425, | |
| "eval_loss": 0.26657551527023315, | |
| "eval_runtime": 422.2898, | |
| "eval_samples_per_second": 10.91, | |
| "eval_steps_per_second": 5.456, | |
| "step": 71763 | |
| }, | |
| { | |
| "epoch": 3.8129534501932953, | |
| "grad_norm": 0.6750999093055725, | |
| "learning_rate": 7.301542430078269e-05, | |
| "loss": 0.1775, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 3.8394322935974157, | |
| "grad_norm": 1.8476779460906982, | |
| "learning_rate": 7.213201646672204e-05, | |
| "loss": 0.1675, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 3.865911137001536, | |
| "grad_norm": 2.567692756652832, | |
| "learning_rate": 7.124860863266137e-05, | |
| "loss": 0.1723, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 3.892389980405656, | |
| "grad_norm": 1.9294815063476562, | |
| "learning_rate": 7.036520079860068e-05, | |
| "loss": 0.1703, | |
| "step": 73500 | |
| }, | |
| { | |
| "epoch": 3.9188688238097757, | |
| "grad_norm": 2.111727714538574, | |
| "learning_rate": 6.948355978020813e-05, | |
| "loss": 0.1686, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 3.945347667213896, | |
| "grad_norm": 2.216778039932251, | |
| "learning_rate": 6.860015194614746e-05, | |
| "loss": 0.1725, | |
| "step": 74500 | |
| }, | |
| { | |
| "epoch": 3.9718265106180164, | |
| "grad_norm": 1.494307041168213, | |
| "learning_rate": 6.77167441120868e-05, | |
| "loss": 0.1789, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 3.9983053540221363, | |
| "grad_norm": 1.8370423316955566, | |
| "learning_rate": 6.683333627802611e-05, | |
| "loss": 0.1597, | |
| "step": 75500 | |
| }, | |
| { | |
| "epoch": 4.000423661494466, | |
| "eval_loss": 0.26198258996009827, | |
| "eval_runtime": 422.2011, | |
| "eval_samples_per_second": 10.912, | |
| "eval_steps_per_second": 5.457, | |
| "step": 75540 | |
| }, | |
| { | |
| "epoch": 4.024784197426256, | |
| "grad_norm": 2.989509105682373, | |
| "learning_rate": 6.595169525963356e-05, | |
| "loss": 0.1527, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 4.051263040830377, | |
| "grad_norm": 2.3569653034210205, | |
| "learning_rate": 6.506828742557289e-05, | |
| "loss": 0.1481, | |
| "step": 76500 | |
| }, | |
| { | |
| "epoch": 4.077741884234497, | |
| "grad_norm": 1.7681636810302734, | |
| "learning_rate": 6.418487959151222e-05, | |
| "loss": 0.1438, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 4.104220727638617, | |
| "grad_norm": 1.9401808977127075, | |
| "learning_rate": 6.330147175745155e-05, | |
| "loss": 0.1453, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 4.1306995710427366, | |
| "grad_norm": 1.9549821615219116, | |
| "learning_rate": 6.241806392339088e-05, | |
| "loss": 0.1453, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 4.157178414446857, | |
| "grad_norm": 2.7086589336395264, | |
| "learning_rate": 6.15346560893302e-05, | |
| "loss": 0.1424, | |
| "step": 78500 | |
| }, | |
| { | |
| "epoch": 4.183657257850977, | |
| "grad_norm": 2.071589469909668, | |
| "learning_rate": 6.065124825526953e-05, | |
| "loss": 0.1388, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 4.200444844569189, | |
| "eval_loss": 0.2703910171985626, | |
| "eval_runtime": 422.189, | |
| "eval_samples_per_second": 10.912, | |
| "eval_steps_per_second": 5.457, | |
| "step": 79317 | |
| }, | |
| { | |
| "epoch": 4.210136101255097, | |
| "grad_norm": 2.643260955810547, | |
| "learning_rate": 5.9767840421208854e-05, | |
| "loss": 0.1501, | |
| "step": 79500 | |
| }, | |
| { | |
| "epoch": 4.236614944659217, | |
| "grad_norm": 1.3025317192077637, | |
| "learning_rate": 5.888796621848443e-05, | |
| "loss": 0.1531, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 4.263093788063338, | |
| "grad_norm": 0.5772294998168945, | |
| "learning_rate": 5.800455838442376e-05, | |
| "loss": 0.1547, | |
| "step": 80500 | |
| }, | |
| { | |
| "epoch": 4.289572631467458, | |
| "grad_norm": 1.2284057140350342, | |
| "learning_rate": 5.712115055036308e-05, | |
| "loss": 0.1532, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 4.3160514748715775, | |
| "grad_norm": 1.778266429901123, | |
| "learning_rate": 5.623774271630241e-05, | |
| "loss": 0.1444, | |
| "step": 81500 | |
| }, | |
| { | |
| "epoch": 4.342530318275697, | |
| "grad_norm": 4.491095542907715, | |
| "learning_rate": 5.535433488224174e-05, | |
| "loss": 0.141, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 4.369009161679818, | |
| "grad_norm": 1.7908458709716797, | |
| "learning_rate": 5.4470927048181065e-05, | |
| "loss": 0.1521, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 4.395488005083938, | |
| "grad_norm": 0.9978257417678833, | |
| "learning_rate": 5.3587519214120395e-05, | |
| "loss": 0.1452, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 4.400466027643913, | |
| "eval_loss": 0.27073460817337036, | |
| "eval_runtime": 422.1641, | |
| "eval_samples_per_second": 10.913, | |
| "eval_steps_per_second": 5.458, | |
| "step": 83094 | |
| }, | |
| { | |
| "epoch": 4.421966848488058, | |
| "grad_norm": 3.2376346588134766, | |
| "learning_rate": 5.270411138005972e-05, | |
| "loss": 0.1487, | |
| "step": 83500 | |
| }, | |
| { | |
| "epoch": 4.448445691892178, | |
| "grad_norm": 1.5663423538208008, | |
| "learning_rate": 5.182070354599905e-05, | |
| "loss": 0.1489, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 4.4749245352962985, | |
| "grad_norm": 1.9214493036270142, | |
| "learning_rate": 5.093729571193837e-05, | |
| "loss": 0.1431, | |
| "step": 84500 | |
| }, | |
| { | |
| "epoch": 4.501403378700418, | |
| "grad_norm": 0.39390167593955994, | |
| "learning_rate": 5.005565469354583e-05, | |
| "loss": 0.1488, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 4.527882222104538, | |
| "grad_norm": 1.417643427848816, | |
| "learning_rate": 4.9172246859485154e-05, | |
| "loss": 0.1424, | |
| "step": 85500 | |
| }, | |
| { | |
| "epoch": 4.554361065508658, | |
| "grad_norm": 1.4273784160614014, | |
| "learning_rate": 4.8288839025424484e-05, | |
| "loss": 0.1426, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 4.580839908912779, | |
| "grad_norm": 2.3503715991973877, | |
| "learning_rate": 4.740543119136381e-05, | |
| "loss": 0.146, | |
| "step": 86500 | |
| }, | |
| { | |
| "epoch": 4.600487210718636, | |
| "eval_loss": 0.2704991102218628, | |
| "eval_runtime": 422.5207, | |
| "eval_samples_per_second": 10.904, | |
| "eval_steps_per_second": 5.453, | |
| "step": 86871 | |
| }, | |
| { | |
| "epoch": 4.607318752316899, | |
| "grad_norm": 2.605318784713745, | |
| "learning_rate": 4.652202335730314e-05, | |
| "loss": 0.1496, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 4.633797595721019, | |
| "grad_norm": 2.3824856281280518, | |
| "learning_rate": 4.563861552324246e-05, | |
| "loss": 0.1389, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 4.6602764391251394, | |
| "grad_norm": 1.4241347312927246, | |
| "learning_rate": 4.4755207689181785e-05, | |
| "loss": 0.1451, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 4.686755282529259, | |
| "grad_norm": 1.1952823400497437, | |
| "learning_rate": 4.3871799855121115e-05, | |
| "loss": 0.1441, | |
| "step": 88500 | |
| }, | |
| { | |
| "epoch": 4.713234125933379, | |
| "grad_norm": 1.2604337930679321, | |
| "learning_rate": 4.299015883672857e-05, | |
| "loss": 0.144, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 4.739712969337499, | |
| "grad_norm": 2.8621230125427246, | |
| "learning_rate": 4.210675100266789e-05, | |
| "loss": 0.1481, | |
| "step": 89500 | |
| }, | |
| { | |
| "epoch": 4.766191812741619, | |
| "grad_norm": 0.4195690453052521, | |
| "learning_rate": 4.122334316860722e-05, | |
| "loss": 0.1505, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 4.79267065614574, | |
| "grad_norm": 2.966862916946411, | |
| "learning_rate": 4.0339935334546544e-05, | |
| "loss": 0.1388, | |
| "step": 90500 | |
| }, | |
| { | |
| "epoch": 4.8005083937933595, | |
| "eval_loss": 0.26222139596939087, | |
| "eval_runtime": 421.6806, | |
| "eval_samples_per_second": 10.925, | |
| "eval_steps_per_second": 5.464, | |
| "step": 90648 | |
| }, | |
| { | |
| "epoch": 4.81914949954986, | |
| "grad_norm": 1.312051773071289, | |
| "learning_rate": 3.9458294316153995e-05, | |
| "loss": 0.1428, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 4.8456283429539795, | |
| "grad_norm": 0.6421635746955872, | |
| "learning_rate": 3.8574886482093326e-05, | |
| "loss": 0.1435, | |
| "step": 91500 | |
| }, | |
| { | |
| "epoch": 4.8721071863581, | |
| "grad_norm": 1.8707529306411743, | |
| "learning_rate": 3.769147864803265e-05, | |
| "loss": 0.1423, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 4.89858602976222, | |
| "grad_norm": 1.6387176513671875, | |
| "learning_rate": 3.680807081397198e-05, | |
| "loss": 0.1378, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 4.92506487316634, | |
| "grad_norm": 2.160985231399536, | |
| "learning_rate": 3.592642979557943e-05, | |
| "loss": 0.1439, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 4.95154371657046, | |
| "grad_norm": 1.3969603776931763, | |
| "learning_rate": 3.5043021961518754e-05, | |
| "loss": 0.1436, | |
| "step": 93500 | |
| }, | |
| { | |
| "epoch": 4.978022559974581, | |
| "grad_norm": 1.672614574432373, | |
| "learning_rate": 3.4159614127458085e-05, | |
| "loss": 0.1435, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 5.000529576868082, | |
| "eval_loss": 0.26488494873046875, | |
| "eval_runtime": 422.1282, | |
| "eval_samples_per_second": 10.914, | |
| "eval_steps_per_second": 5.458, | |
| "step": 94425 | |
| }, | |
| { | |
| "epoch": 5.0045014033787005, | |
| "grad_norm": 3.6483752727508545, | |
| "learning_rate": 3.327620629339741e-05, | |
| "loss": 0.1358, | |
| "step": 94500 | |
| }, | |
| { | |
| "epoch": 5.03098024678282, | |
| "grad_norm": 1.201677918434143, | |
| "learning_rate": 3.239279845933674e-05, | |
| "loss": 0.1302, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 5.05745909018694, | |
| "grad_norm": 2.516324281692505, | |
| "learning_rate": 3.150939062527606e-05, | |
| "loss": 0.1218, | |
| "step": 95500 | |
| }, | |
| { | |
| "epoch": 5.083937933591061, | |
| "grad_norm": 0.14960531890392303, | |
| "learning_rate": 3.06259827912154e-05, | |
| "loss": 0.1228, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 5.110416776995181, | |
| "grad_norm": 0.23572592437267303, | |
| "learning_rate": 2.9742574957154723e-05, | |
| "loss": 0.1302, | |
| "step": 96500 | |
| }, | |
| { | |
| "epoch": 5.136895620399301, | |
| "grad_norm": 1.1999266147613525, | |
| "learning_rate": 2.885916712309405e-05, | |
| "loss": 0.1201, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 5.163374463803421, | |
| "grad_norm": 2.529439687728882, | |
| "learning_rate": 2.7977526104701497e-05, | |
| "loss": 0.1217, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 5.1898533072075415, | |
| "grad_norm": 4.092609405517578, | |
| "learning_rate": 2.7094118270640828e-05, | |
| "loss": 0.1295, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 5.200550759942805, | |
| "eval_loss": 0.2793600857257843, | |
| "eval_runtime": 422.1911, | |
| "eval_samples_per_second": 10.912, | |
| "eval_steps_per_second": 5.457, | |
| "step": 98202 | |
| }, | |
| { | |
| "epoch": 5.216332150611661, | |
| "grad_norm": 1.677905797958374, | |
| "learning_rate": 2.6210710436580155e-05, | |
| "loss": 0.1232, | |
| "step": 98500 | |
| }, | |
| { | |
| "epoch": 5.242810994015781, | |
| "grad_norm": 2.0790467262268066, | |
| "learning_rate": 2.532730260251948e-05, | |
| "loss": 0.1145, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 5.269289837419901, | |
| "grad_norm": 0.821835458278656, | |
| "learning_rate": 2.444389476845881e-05, | |
| "loss": 0.122, | |
| "step": 99500 | |
| }, | |
| { | |
| "epoch": 5.295768680824022, | |
| "grad_norm": 1.6524577140808105, | |
| "learning_rate": 2.3560486934398135e-05, | |
| "loss": 0.1211, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 5.322247524228142, | |
| "grad_norm": 1.0877349376678467, | |
| "learning_rate": 2.2677079100337462e-05, | |
| "loss": 0.1213, | |
| "step": 100500 | |
| }, | |
| { | |
| "epoch": 5.348726367632262, | |
| "grad_norm": 1.8999888896942139, | |
| "learning_rate": 2.1793671266276793e-05, | |
| "loss": 0.1212, | |
| "step": 101000 | |
| }, | |
| { | |
| "epoch": 5.375205211036382, | |
| "grad_norm": 1.6684705018997192, | |
| "learning_rate": 2.091203024788424e-05, | |
| "loss": 0.1195, | |
| "step": 101500 | |
| }, | |
| { | |
| "epoch": 5.400571943017529, | |
| "eval_loss": 0.2780030369758606, | |
| "eval_runtime": 423.5819, | |
| "eval_samples_per_second": 10.876, | |
| "eval_steps_per_second": 5.439, | |
| "step": 101979 | |
| }, | |
| { | |
| "epoch": 5.401684054440502, | |
| "grad_norm": 4.362273216247559, | |
| "learning_rate": 2.0028622413823567e-05, | |
| "loss": 0.1269, | |
| "step": 102000 | |
| }, | |
| { | |
| "epoch": 5.428162897844622, | |
| "grad_norm": 1.8318756818771362, | |
| "learning_rate": 1.9146981395431015e-05, | |
| "loss": 0.1211, | |
| "step": 102500 | |
| }, | |
| { | |
| "epoch": 5.454641741248742, | |
| "grad_norm": 0.10318777710199356, | |
| "learning_rate": 1.8263573561370342e-05, | |
| "loss": 0.122, | |
| "step": 103000 | |
| }, | |
| { | |
| "epoch": 5.481120584652862, | |
| "grad_norm": 3.140679121017456, | |
| "learning_rate": 1.7380165727309673e-05, | |
| "loss": 0.1235, | |
| "step": 103500 | |
| }, | |
| { | |
| "epoch": 5.507599428056983, | |
| "grad_norm": 1.0891458988189697, | |
| "learning_rate": 1.6496757893249e-05, | |
| "loss": 0.1195, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 5.5340782714611025, | |
| "grad_norm": 1.6873986721038818, | |
| "learning_rate": 1.5613350059188326e-05, | |
| "loss": 0.1253, | |
| "step": 104500 | |
| }, | |
| { | |
| "epoch": 5.560557114865222, | |
| "grad_norm": 0.6695570945739746, | |
| "learning_rate": 1.4729942225127652e-05, | |
| "loss": 0.1245, | |
| "step": 105000 | |
| }, | |
| { | |
| "epoch": 5.587035958269343, | |
| "grad_norm": 1.3430531024932861, | |
| "learning_rate": 1.3846534391066982e-05, | |
| "loss": 0.124, | |
| "step": 105500 | |
| }, | |
| { | |
| "epoch": 5.600593126092252, | |
| "eval_loss": 0.27956077456474304, | |
| "eval_runtime": 422.629, | |
| "eval_samples_per_second": 10.901, | |
| "eval_steps_per_second": 5.452, | |
| "step": 105756 | |
| }, | |
| { | |
| "epoch": 5.613514801673463, | |
| "grad_norm": 3.5196123123168945, | |
| "learning_rate": 1.2963126557006309e-05, | |
| "loss": 0.1259, | |
| "step": 106000 | |
| }, | |
| { | |
| "epoch": 5.639993645077583, | |
| "grad_norm": 1.313728928565979, | |
| "learning_rate": 1.2081485538613757e-05, | |
| "loss": 0.124, | |
| "step": 106500 | |
| }, | |
| { | |
| "epoch": 5.666472488481703, | |
| "grad_norm": 1.7873483896255493, | |
| "learning_rate": 1.1198077704553085e-05, | |
| "loss": 0.1297, | |
| "step": 107000 | |
| }, | |
| { | |
| "epoch": 5.692951331885824, | |
| "grad_norm": 3.823012351989746, | |
| "learning_rate": 1.0314669870492412e-05, | |
| "loss": 0.1192, | |
| "step": 107500 | |
| }, | |
| { | |
| "epoch": 5.7194301752899435, | |
| "grad_norm": 0.8463395833969116, | |
| "learning_rate": 9.43126203643174e-06, | |
| "loss": 0.1183, | |
| "step": 108000 | |
| }, | |
| { | |
| "epoch": 5.745909018694063, | |
| "grad_norm": 3.8344569206237793, | |
| "learning_rate": 8.547854202371066e-06, | |
| "loss": 0.1207, | |
| "step": 108500 | |
| }, | |
| { | |
| "epoch": 5.772387862098183, | |
| "grad_norm": 2.5786736011505127, | |
| "learning_rate": 7.666213183978516e-06, | |
| "loss": 0.1148, | |
| "step": 109000 | |
| }, | |
| { | |
| "epoch": 5.798866705502304, | |
| "grad_norm": 2.022777795791626, | |
| "learning_rate": 6.7828053499178435e-06, | |
| "loss": 0.1183, | |
| "step": 109500 | |
| }, | |
| { | |
| "epoch": 5.800614309166976, | |
| "eval_loss": 0.283890038728714, | |
| "eval_runtime": 421.7577, | |
| "eval_samples_per_second": 10.923, | |
| "eval_steps_per_second": 5.463, | |
| "step": 109533 | |
| }, | |
| { | |
| "epoch": 5.825345548906424, | |
| "grad_norm": 2.0010786056518555, | |
| "learning_rate": 5.9011643315252915e-06, | |
| "loss": 0.1184, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 5.851824392310544, | |
| "grad_norm": 3.0789356231689453, | |
| "learning_rate": 5.019523313132741e-06, | |
| "loss": 0.1231, | |
| "step": 110500 | |
| }, | |
| { | |
| "epoch": 5.878303235714664, | |
| "grad_norm": 0.8194056153297424, | |
| "learning_rate": 4.136115479072069e-06, | |
| "loss": 0.119, | |
| "step": 111000 | |
| }, | |
| { | |
| "epoch": 5.904782079118784, | |
| "grad_norm": 1.4833017587661743, | |
| "learning_rate": 3.2527076450113963e-06, | |
| "loss": 0.119, | |
| "step": 111500 | |
| }, | |
| { | |
| "epoch": 5.931260922522904, | |
| "grad_norm": 3.702115535736084, | |
| "learning_rate": 2.3692998109507236e-06, | |
| "loss": 0.1129, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 5.957739765927024, | |
| "grad_norm": 2.612248659133911, | |
| "learning_rate": 1.4858919768900512e-06, | |
| "loss": 0.1106, | |
| "step": 112500 | |
| }, | |
| { | |
| "epoch": 5.984218609331144, | |
| "grad_norm": 4.074622631072998, | |
| "learning_rate": 6.024841428293787e-07, | |
| "loss": 0.1201, | |
| "step": 113000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 113298, | |
| "total_flos": 1.5167000684003328e+18, | |
| "train_loss": 0.20791157794172538, | |
| "train_runtime": 110416.4573, | |
| "train_samples_per_second": 2.052, | |
| "train_steps_per_second": 1.026 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 113298, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.5167000684003328e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |