| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 10, | |
| "global_step": 98, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01020408163265306, | |
| "grad_norm": 1.5935372114181519, | |
| "learning_rate": 1e-05, | |
| "loss": 1.1572, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02040816326530612, | |
| "grad_norm": 1.8600682020187378, | |
| "learning_rate": 9.997377845227577e-06, | |
| "loss": 1.3198, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.030612244897959183, | |
| "grad_norm": 1.662075400352478, | |
| "learning_rate": 9.98951413118856e-06, | |
| "loss": 1.0636, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.04081632653061224, | |
| "grad_norm": 1.6947352886199951, | |
| "learning_rate": 9.97641710583307e-06, | |
| "loss": 1.2853, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.05102040816326531, | |
| "grad_norm": 1.231564998626709, | |
| "learning_rate": 9.958100506132127e-06, | |
| "loss": 1.0573, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.061224489795918366, | |
| "grad_norm": 1.550899863243103, | |
| "learning_rate": 9.934583543669454e-06, | |
| "loss": 1.5486, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 1.017148733139038, | |
| "learning_rate": 9.905890884491196e-06, | |
| "loss": 0.8672, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.08163265306122448, | |
| "grad_norm": 1.2291380167007446, | |
| "learning_rate": 9.872052623234632e-06, | |
| "loss": 1.132, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.09183673469387756, | |
| "grad_norm": 1.1650055646896362, | |
| "learning_rate": 9.833104251563058e-06, | |
| "loss": 1.2419, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.10204081632653061, | |
| "grad_norm": 0.9753865003585815, | |
| "learning_rate": 9.789086620939936e-06, | |
| "loss": 0.9946, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10204081632653061, | |
| "eval_loss": 0.9807827472686768, | |
| "eval_runtime": 0.2604, | |
| "eval_samples_per_second": 30.717, | |
| "eval_steps_per_second": 3.84, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11224489795918367, | |
| "grad_norm": 1.191615343093872, | |
| "learning_rate": 9.740045899781353e-06, | |
| "loss": 1.3435, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.12244897959183673, | |
| "grad_norm": 0.9799769520759583, | |
| "learning_rate": 9.68603352503172e-06, | |
| "loss": 0.7463, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.1326530612244898, | |
| "grad_norm": 1.0710457563400269, | |
| "learning_rate": 9.627106148213521e-06, | |
| "loss": 1.329, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 1.071432113647461, | |
| "learning_rate": 9.563325576007702e-06, | |
| "loss": 1.3815, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.15306122448979592, | |
| "grad_norm": 0.9442039132118225, | |
| "learning_rate": 9.494758705426978e-06, | |
| "loss": 1.0305, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16326530612244897, | |
| "grad_norm": 1.022035837173462, | |
| "learning_rate": 9.421477453650118e-06, | |
| "loss": 1.3317, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.17346938775510204, | |
| "grad_norm": 1.0751738548278809, | |
| "learning_rate": 9.343558682590757e-06, | |
| "loss": 1.323, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1836734693877551, | |
| "grad_norm": 1.0566898584365845, | |
| "learning_rate": 9.261084118279846e-06, | |
| "loss": 1.5154, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.19387755102040816, | |
| "grad_norm": 1.0009628534317017, | |
| "learning_rate": 9.174140265146356e-06, | |
| "loss": 1.2371, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.20408163265306123, | |
| "grad_norm": 0.9259373545646667, | |
| "learning_rate": 9.082818315286054e-06, | |
| "loss": 1.042, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.20408163265306123, | |
| "eval_loss": 0.9510844945907593, | |
| "eval_runtime": 0.2601, | |
| "eval_samples_per_second": 30.76, | |
| "eval_steps_per_second": 3.845, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 0.9078995585441589, | |
| "learning_rate": 8.987214052813605e-06, | |
| "loss": 1.07, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.22448979591836735, | |
| "grad_norm": 0.9820942282676697, | |
| "learning_rate": 8.887427753398249e-06, | |
| "loss": 1.2102, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.23469387755102042, | |
| "grad_norm": 1.0587691068649292, | |
| "learning_rate": 8.783564079088478e-06, | |
| "loss": 1.0913, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.24489795918367346, | |
| "grad_norm": 0.8137860894203186, | |
| "learning_rate": 8.675731968536004e-06, | |
| "loss": 0.8923, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.25510204081632654, | |
| "grad_norm": 0.9471170902252197, | |
| "learning_rate": 8.564044522734147e-06, | |
| "loss": 1.1723, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2653061224489796, | |
| "grad_norm": 0.8885685205459595, | |
| "learning_rate": 8.448618886390523e-06, | |
| "loss": 1.1206, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.2755102040816326, | |
| "grad_norm": 0.8613781332969666, | |
| "learning_rate": 8.329576125058406e-06, | |
| "loss": 1.1261, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.9407626986503601, | |
| "learning_rate": 8.207041098155701e-06, | |
| "loss": 1.2727, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.29591836734693877, | |
| "grad_norm": 0.8954651951789856, | |
| "learning_rate": 8.081142328004638e-06, | |
| "loss": 1.2197, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.30612244897959184, | |
| "grad_norm": 0.8864608407020569, | |
| "learning_rate": 7.952011865029614e-06, | |
| "loss": 1.1236, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.30612244897959184, | |
| "eval_loss": 0.9308969974517822, | |
| "eval_runtime": 0.2599, | |
| "eval_samples_per_second": 30.775, | |
| "eval_steps_per_second": 3.847, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3163265306122449, | |
| "grad_norm": 0.8076571226119995, | |
| "learning_rate": 7.819785149254534e-06, | |
| "loss": 0.9098, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.32653061224489793, | |
| "grad_norm": 0.8902921080589294, | |
| "learning_rate": 7.68460086824492e-06, | |
| "loss": 1.0564, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.336734693877551, | |
| "grad_norm": 0.9010928273200989, | |
| "learning_rate": 7.546600811643816e-06, | |
| "loss": 1.0174, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.3469387755102041, | |
| "grad_norm": 0.8672641515731812, | |
| "learning_rate": 7.405929722454026e-06, | |
| "loss": 1.1875, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.7878830432891846, | |
| "learning_rate": 7.262735145222696e-06, | |
| "loss": 0.9453, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.3673469387755102, | |
| "grad_norm": 0.85221266746521, | |
| "learning_rate": 7.117167271287453e-06, | |
| "loss": 1.0147, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.37755102040816324, | |
| "grad_norm": 0.8591777682304382, | |
| "learning_rate": 6.969378781246436e-06, | |
| "loss": 0.995, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.3877551020408163, | |
| "grad_norm": 0.9585978388786316, | |
| "learning_rate": 6.819524684817439e-06, | |
| "loss": 0.9636, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.3979591836734694, | |
| "grad_norm": 0.9561256170272827, | |
| "learning_rate": 6.667762158254104e-06, | |
| "loss": 1.1511, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "grad_norm": 0.9076462388038635, | |
| "learning_rate": 6.514250379489754e-06, | |
| "loss": 1.238, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "eval_loss": 0.9210565090179443, | |
| "eval_runtime": 0.26, | |
| "eval_samples_per_second": 30.764, | |
| "eval_steps_per_second": 3.846, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.41836734693877553, | |
| "grad_norm": 0.779888391494751, | |
| "learning_rate": 6.3591503611817155e-06, | |
| "loss": 0.927, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.826004147529602, | |
| "learning_rate": 6.202624781831269e-06, | |
| "loss": 1.1018, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.4387755102040816, | |
| "grad_norm": 0.8641281127929688, | |
| "learning_rate": 6.044837815156377e-06, | |
| "loss": 0.938, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.4489795918367347, | |
| "grad_norm": 0.896023154258728, | |
| "learning_rate": 5.885954957896115e-06, | |
| "loss": 1.0764, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.45918367346938777, | |
| "grad_norm": 0.9072222709655762, | |
| "learning_rate": 5.726142856227453e-06, | |
| "loss": 1.2437, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.46938775510204084, | |
| "grad_norm": 0.7771997451782227, | |
| "learning_rate": 5.5655691309764225e-06, | |
| "loss": 1.0162, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.47959183673469385, | |
| "grad_norm": 0.8864902853965759, | |
| "learning_rate": 5.404402201807022e-06, | |
| "loss": 1.1398, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.4897959183673469, | |
| "grad_norm": 0.702143669128418, | |
| "learning_rate": 5.242811110572243e-06, | |
| "loss": 0.8257, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.7524011731147766, | |
| "learning_rate": 5.080965344012509e-06, | |
| "loss": 0.9318, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.5102040816326531, | |
| "grad_norm": 0.7233298420906067, | |
| "learning_rate": 4.919034655987493e-06, | |
| "loss": 0.8178, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5102040816326531, | |
| "eval_loss": 0.9119246006011963, | |
| "eval_runtime": 0.2595, | |
| "eval_samples_per_second": 30.823, | |
| "eval_steps_per_second": 3.853, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5204081632653061, | |
| "grad_norm": 0.8432551622390747, | |
| "learning_rate": 4.757188889427761e-06, | |
| "loss": 1.0659, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.5306122448979592, | |
| "grad_norm": 0.772146999835968, | |
| "learning_rate": 4.59559779819298e-06, | |
| "loss": 1.0016, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.5408163265306123, | |
| "grad_norm": 0.8176573514938354, | |
| "learning_rate": 4.434430869023579e-06, | |
| "loss": 1.0541, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.5510204081632653, | |
| "grad_norm": 0.730097770690918, | |
| "learning_rate": 4.27385714377255e-06, | |
| "loss": 0.8034, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.5612244897959183, | |
| "grad_norm": 0.7190036177635193, | |
| "learning_rate": 4.1140450421038865e-06, | |
| "loss": 0.8833, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.732845664024353, | |
| "learning_rate": 3.955162184843625e-06, | |
| "loss": 0.8837, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.5816326530612245, | |
| "grad_norm": 0.770910918712616, | |
| "learning_rate": 3.7973752181687336e-06, | |
| "loss": 1.0044, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.5918367346938775, | |
| "grad_norm": 0.7635073065757751, | |
| "learning_rate": 3.6408496388182857e-06, | |
| "loss": 0.9338, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.6020408163265306, | |
| "grad_norm": 0.8665493130683899, | |
| "learning_rate": 3.4857496205102475e-06, | |
| "loss": 1.1868, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "grad_norm": 0.7718795537948608, | |
| "learning_rate": 3.3322378417458985e-06, | |
| "loss": 0.951, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "eval_loss": 0.9065544605255127, | |
| "eval_runtime": 0.2595, | |
| "eval_samples_per_second": 30.831, | |
| "eval_steps_per_second": 3.854, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6224489795918368, | |
| "grad_norm": 0.932878851890564, | |
| "learning_rate": 3.180475315182563e-06, | |
| "loss": 1.2871, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.6326530612244898, | |
| "grad_norm": 0.8946093320846558, | |
| "learning_rate": 3.0306212187535653e-06, | |
| "loss": 0.9965, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 0.7468050718307495, | |
| "learning_rate": 2.882832728712551e-06, | |
| "loss": 0.8174, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.6530612244897959, | |
| "grad_norm": 0.7200383543968201, | |
| "learning_rate": 2.7372648547773063e-06, | |
| "loss": 0.9641, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.6632653061224489, | |
| "grad_norm": 0.7331112623214722, | |
| "learning_rate": 2.594070277545975e-06, | |
| "loss": 0.8579, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.673469387755102, | |
| "grad_norm": 0.713594377040863, | |
| "learning_rate": 2.4533991883561868e-06, | |
| "loss": 0.872, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.6836734693877551, | |
| "grad_norm": 0.8468301296234131, | |
| "learning_rate": 2.315399131755081e-06, | |
| "loss": 1.1029, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.6938775510204082, | |
| "grad_norm": 0.7141689658164978, | |
| "learning_rate": 2.1802148507454675e-06, | |
| "loss": 0.8727, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.7040816326530612, | |
| "grad_norm": 0.7567382454872131, | |
| "learning_rate": 2.0479881349703885e-06, | |
| "loss": 0.9211, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.7020692825317383, | |
| "learning_rate": 1.9188576719953635e-06, | |
| "loss": 0.7869, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "eval_loss": 0.9032285809516907, | |
| "eval_runtime": 0.2594, | |
| "eval_samples_per_second": 30.835, | |
| "eval_steps_per_second": 3.854, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7244897959183674, | |
| "grad_norm": 0.853243350982666, | |
| "learning_rate": 1.7929589018443016e-06, | |
| "loss": 1.1762, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.7346938775510204, | |
| "grad_norm": 0.8287662267684937, | |
| "learning_rate": 1.6704238749415958e-06, | |
| "loss": 1.0489, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.7448979591836735, | |
| "grad_norm": 0.6973975300788879, | |
| "learning_rate": 1.5513811136094786e-06, | |
| "loss": 0.8249, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.7551020408163265, | |
| "grad_norm": 0.8150553703308105, | |
| "learning_rate": 1.4359554772658551e-06, | |
| "loss": 1.0011, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.7653061224489796, | |
| "grad_norm": 0.7974684238433838, | |
| "learning_rate": 1.3242680314639995e-06, | |
| "loss": 1.0774, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.7755102040816326, | |
| "grad_norm": 0.8233014941215515, | |
| "learning_rate": 1.2164359209115235e-06, | |
| "loss": 1.1492, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 0.8305495977401733, | |
| "learning_rate": 1.1125722466017547e-06, | |
| "loss": 1.1329, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.7959183673469388, | |
| "grad_norm": 0.7523290514945984, | |
| "learning_rate": 1.012785947186397e-06, | |
| "loss": 0.9375, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.8061224489795918, | |
| "grad_norm": 0.8073377013206482, | |
| "learning_rate": 9.171816847139447e-07, | |
| "loss": 1.0936, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "grad_norm": 0.8685086965560913, | |
| "learning_rate": 8.258597348536452e-07, | |
| "loss": 1.2626, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "eval_loss": 0.9012266397476196, | |
| "eval_runtime": 0.2595, | |
| "eval_samples_per_second": 30.833, | |
| "eval_steps_per_second": 3.854, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.826530612244898, | |
| "grad_norm": 0.877649188041687, | |
| "learning_rate": 7.389158817201541e-07, | |
| "loss": 1.3479, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.8367346938775511, | |
| "grad_norm": 0.7652468085289001, | |
| "learning_rate": 6.564413174092443e-07, | |
| "loss": 0.9699, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.8469387755102041, | |
| "grad_norm": 0.8436093926429749, | |
| "learning_rate": 5.785225463498828e-07, | |
| "loss": 1.1757, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.8732744455337524, | |
| "learning_rate": 5.05241294573024e-07, | |
| "loss": 1.1877, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.8673469387755102, | |
| "grad_norm": 0.7433239221572876, | |
| "learning_rate": 4.3667442399229985e-07, | |
| "loss": 0.9878, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.8775510204081632, | |
| "grad_norm": 0.8225981593132019, | |
| "learning_rate": 3.728938517864794e-07, | |
| "loss": 1.145, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.8877551020408163, | |
| "grad_norm": 0.8591734170913696, | |
| "learning_rate": 3.1396647496828245e-07, | |
| "loss": 1.128, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.8979591836734694, | |
| "grad_norm": 0.7502849102020264, | |
| "learning_rate": 2.599541002186479e-07, | |
| "loss": 0.9712, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.9081632653061225, | |
| "grad_norm": 0.7429087162017822, | |
| "learning_rate": 2.109133790600648e-07, | |
| "loss": 0.9278, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.9183673469387755, | |
| "grad_norm": 0.8890379667282104, | |
| "learning_rate": 1.6689574843694433e-07, | |
| "loss": 1.2385, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9183673469387755, | |
| "eval_loss": 0.9008685350418091, | |
| "eval_runtime": 0.2594, | |
| "eval_samples_per_second": 30.837, | |
| "eval_steps_per_second": 3.855, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 0.9237527251243591, | |
| "learning_rate": 1.2794737676536993e-07, | |
| "loss": 1.0866, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.9387755102040817, | |
| "grad_norm": 0.8110918402671814, | |
| "learning_rate": 9.410911550880474e-08, | |
| "loss": 1.0466, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.9489795918367347, | |
| "grad_norm": 0.7134807109832764, | |
| "learning_rate": 6.54164563305465e-08, | |
| "loss": 0.8466, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.9591836734693877, | |
| "grad_norm": 0.8616638779640198, | |
| "learning_rate": 4.189949386787462e-08, | |
| "loss": 1.1521, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.9693877551020408, | |
| "grad_norm": 0.9501765966415405, | |
| "learning_rate": 2.358289416693027e-08, | |
| "loss": 1.1505, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.9795918367346939, | |
| "grad_norm": 0.7652553915977478, | |
| "learning_rate": 1.0485868811441757e-08, | |
| "loss": 0.9055, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.9897959183673469, | |
| "grad_norm": 0.862601637840271, | |
| "learning_rate": 2.6221547724253337e-09, | |
| "loss": 1.0224, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.712780773639679, | |
| "learning_rate": 0.0, | |
| "loss": 0.9534, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 98, | |
| "total_flos": 3.270639151271117e+16, | |
| "train_loss": 1.0707874030483013, | |
| "train_runtime": 199.5779, | |
| "train_samples_per_second": 3.908, | |
| "train_steps_per_second": 0.491 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 98, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 98, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.270639151271117e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |