chchen's picture
End of training
e1ca4fd verified
{
"best_metric": 0.0367230549454689,
"best_model_checkpoint": "saves/psy-course/Llama3-OpenBioLLM-8B/train/fold9/checkpoint-1050",
"epoch": 4.997121473805412,
"eval_steps": 50,
"global_step": 3255,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01535213970447131,
"grad_norm": 6.4076361656188965,
"learning_rate": 3.067484662576687e-06,
"loss": 1.3982,
"step": 10
},
{
"epoch": 0.03070427940894262,
"grad_norm": 6.215030193328857,
"learning_rate": 6.134969325153374e-06,
"loss": 1.1812,
"step": 20
},
{
"epoch": 0.04605641911341393,
"grad_norm": 4.46425724029541,
"learning_rate": 9.202453987730062e-06,
"loss": 1.2206,
"step": 30
},
{
"epoch": 0.06140855881788524,
"grad_norm": 2.6584370136260986,
"learning_rate": 1.2269938650306748e-05,
"loss": 0.801,
"step": 40
},
{
"epoch": 0.07676069852235655,
"grad_norm": 2.24800705909729,
"learning_rate": 1.5337423312883436e-05,
"loss": 0.5046,
"step": 50
},
{
"epoch": 0.07676069852235655,
"eval_loss": 0.30626675486564636,
"eval_runtime": 186.0533,
"eval_samples_per_second": 6.229,
"eval_steps_per_second": 6.229,
"step": 50
},
{
"epoch": 0.09211283822682786,
"grad_norm": 1.3170571327209473,
"learning_rate": 1.8404907975460123e-05,
"loss": 0.2991,
"step": 60
},
{
"epoch": 0.10746497793129918,
"grad_norm": 1.3401583433151245,
"learning_rate": 2.1472392638036813e-05,
"loss": 0.2284,
"step": 70
},
{
"epoch": 0.12281711763577048,
"grad_norm": 1.7484021186828613,
"learning_rate": 2.4539877300613496e-05,
"loss": 0.1355,
"step": 80
},
{
"epoch": 0.1381692573402418,
"grad_norm": 1.4553366899490356,
"learning_rate": 2.7607361963190186e-05,
"loss": 0.1286,
"step": 90
},
{
"epoch": 0.1535213970447131,
"grad_norm": 1.0929967164993286,
"learning_rate": 3.067484662576687e-05,
"loss": 0.1059,
"step": 100
},
{
"epoch": 0.1535213970447131,
"eval_loss": 0.08417145907878876,
"eval_runtime": 185.0702,
"eval_samples_per_second": 6.262,
"eval_steps_per_second": 6.262,
"step": 100
},
{
"epoch": 0.16887353674918443,
"grad_norm": 0.7335715293884277,
"learning_rate": 3.3742331288343556e-05,
"loss": 0.0726,
"step": 110
},
{
"epoch": 0.18422567645365573,
"grad_norm": 1.5828769207000732,
"learning_rate": 3.6809815950920246e-05,
"loss": 0.0767,
"step": 120
},
{
"epoch": 0.19957781615812703,
"grad_norm": 1.2771799564361572,
"learning_rate": 3.987730061349693e-05,
"loss": 0.0823,
"step": 130
},
{
"epoch": 0.21492995586259836,
"grad_norm": 0.6682676076889038,
"learning_rate": 4.2944785276073626e-05,
"loss": 0.0574,
"step": 140
},
{
"epoch": 0.23028209556706966,
"grad_norm": 1.404420256614685,
"learning_rate": 4.601226993865031e-05,
"loss": 0.0783,
"step": 150
},
{
"epoch": 0.23028209556706966,
"eval_loss": 0.06952261179685593,
"eval_runtime": 185.3588,
"eval_samples_per_second": 6.253,
"eval_steps_per_second": 6.253,
"step": 150
},
{
"epoch": 0.24563423527154096,
"grad_norm": 1.2250556945800781,
"learning_rate": 4.907975460122699e-05,
"loss": 0.0974,
"step": 160
},
{
"epoch": 0.2609863749760123,
"grad_norm": 0.7745575308799744,
"learning_rate": 5.214723926380368e-05,
"loss": 0.0751,
"step": 170
},
{
"epoch": 0.2763385146804836,
"grad_norm": 1.1263943910598755,
"learning_rate": 5.521472392638037e-05,
"loss": 0.0761,
"step": 180
},
{
"epoch": 0.2916906543849549,
"grad_norm": 2.072108507156372,
"learning_rate": 5.8282208588957056e-05,
"loss": 0.0629,
"step": 190
},
{
"epoch": 0.3070427940894262,
"grad_norm": 0.7699011564254761,
"learning_rate": 6.134969325153375e-05,
"loss": 0.0635,
"step": 200
},
{
"epoch": 0.3070427940894262,
"eval_loss": 0.05947184935212135,
"eval_runtime": 185.1277,
"eval_samples_per_second": 6.261,
"eval_steps_per_second": 6.261,
"step": 200
},
{
"epoch": 0.3223949337938975,
"grad_norm": 0.9900126457214355,
"learning_rate": 6.441717791411042e-05,
"loss": 0.06,
"step": 210
},
{
"epoch": 0.33774707349836885,
"grad_norm": 0.8738503456115723,
"learning_rate": 6.748466257668711e-05,
"loss": 0.0491,
"step": 220
},
{
"epoch": 0.35309921320284016,
"grad_norm": 0.43959110975265503,
"learning_rate": 7.055214723926382e-05,
"loss": 0.0828,
"step": 230
},
{
"epoch": 0.36845135290731146,
"grad_norm": 0.8098151683807373,
"learning_rate": 7.361963190184049e-05,
"loss": 0.0633,
"step": 240
},
{
"epoch": 0.38380349261178276,
"grad_norm": 1.4580163955688477,
"learning_rate": 7.668711656441718e-05,
"loss": 0.075,
"step": 250
},
{
"epoch": 0.38380349261178276,
"eval_loss": 0.053019050508737564,
"eval_runtime": 184.7751,
"eval_samples_per_second": 6.272,
"eval_steps_per_second": 6.272,
"step": 250
},
{
"epoch": 0.39915563231625406,
"grad_norm": 0.49057313799858093,
"learning_rate": 7.975460122699386e-05,
"loss": 0.0613,
"step": 260
},
{
"epoch": 0.41450777202072536,
"grad_norm": 0.6311978101730347,
"learning_rate": 8.282208588957055e-05,
"loss": 0.0652,
"step": 270
},
{
"epoch": 0.4298599117251967,
"grad_norm": 0.8171926736831665,
"learning_rate": 8.588957055214725e-05,
"loss": 0.0692,
"step": 280
},
{
"epoch": 0.445212051429668,
"grad_norm": 0.6747433543205261,
"learning_rate": 8.895705521472393e-05,
"loss": 0.051,
"step": 290
},
{
"epoch": 0.4605641911341393,
"grad_norm": 0.7163071036338806,
"learning_rate": 9.202453987730062e-05,
"loss": 0.065,
"step": 300
},
{
"epoch": 0.4605641911341393,
"eval_loss": 0.0490945465862751,
"eval_runtime": 183.831,
"eval_samples_per_second": 6.305,
"eval_steps_per_second": 6.305,
"step": 300
},
{
"epoch": 0.4759163308386106,
"grad_norm": 0.7852542400360107,
"learning_rate": 9.50920245398773e-05,
"loss": 0.0461,
"step": 310
},
{
"epoch": 0.4912684705430819,
"grad_norm": 0.3825742304325104,
"learning_rate": 9.815950920245399e-05,
"loss": 0.0471,
"step": 320
},
{
"epoch": 0.5066206102475532,
"grad_norm": 0.6208099722862244,
"learning_rate": 9.999953982785432e-05,
"loss": 0.0541,
"step": 330
},
{
"epoch": 0.5219727499520246,
"grad_norm": 0.48451879620552063,
"learning_rate": 9.999436298849151e-05,
"loss": 0.0474,
"step": 340
},
{
"epoch": 0.5373248896564958,
"grad_norm": 0.24559588730335236,
"learning_rate": 9.998343469212352e-05,
"loss": 0.0474,
"step": 350
},
{
"epoch": 0.5373248896564958,
"eval_loss": 0.04777354747056961,
"eval_runtime": 184.3237,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 6.288,
"step": 350
},
{
"epoch": 0.5526770293609672,
"grad_norm": 0.4189242124557495,
"learning_rate": 9.996675619596465e-05,
"loss": 0.047,
"step": 360
},
{
"epoch": 0.5680291690654385,
"grad_norm": 0.7412087917327881,
"learning_rate": 9.99443294187443e-05,
"loss": 0.0571,
"step": 370
},
{
"epoch": 0.5833813087699098,
"grad_norm": 0.2214265614748001,
"learning_rate": 9.991615694048621e-05,
"loss": 0.0483,
"step": 380
},
{
"epoch": 0.5987334484743811,
"grad_norm": 0.5499956011772156,
"learning_rate": 9.988224200221172e-05,
"loss": 0.057,
"step": 390
},
{
"epoch": 0.6140855881788524,
"grad_norm": 0.41717803478240967,
"learning_rate": 9.984258850556693e-05,
"loss": 0.0461,
"step": 400
},
{
"epoch": 0.6140855881788524,
"eval_loss": 0.04933024197816849,
"eval_runtime": 184.7547,
"eval_samples_per_second": 6.273,
"eval_steps_per_second": 6.273,
"step": 400
},
{
"epoch": 0.6294377278833237,
"grad_norm": 0.7418667674064636,
"learning_rate": 9.979720101237375e-05,
"loss": 0.0434,
"step": 410
},
{
"epoch": 0.644789867587795,
"grad_norm": 0.38084396719932556,
"learning_rate": 9.974608474410512e-05,
"loss": 0.0511,
"step": 420
},
{
"epoch": 0.6601420072922664,
"grad_norm": 0.6233692169189453,
"learning_rate": 9.968924558128445e-05,
"loss": 0.0506,
"step": 430
},
{
"epoch": 0.6754941469967377,
"grad_norm": 0.4003570079803467,
"learning_rate": 9.962669006280894e-05,
"loss": 0.0421,
"step": 440
},
{
"epoch": 0.690846286701209,
"grad_norm": 0.4964096248149872,
"learning_rate": 9.95584253851974e-05,
"loss": 0.0533,
"step": 450
},
{
"epoch": 0.690846286701209,
"eval_loss": 0.053962595760822296,
"eval_runtime": 184.4806,
"eval_samples_per_second": 6.283,
"eval_steps_per_second": 6.283,
"step": 450
},
{
"epoch": 0.7061984264056803,
"grad_norm": 0.21844400465488434,
"learning_rate": 9.948445940176243e-05,
"loss": 0.0548,
"step": 460
},
{
"epoch": 0.7215505661101516,
"grad_norm": 0.30259862542152405,
"learning_rate": 9.940480062170679e-05,
"loss": 0.0433,
"step": 470
},
{
"epoch": 0.7369027058146229,
"grad_norm": 0.26537519693374634,
"learning_rate": 9.931945820914462e-05,
"loss": 0.0538,
"step": 480
},
{
"epoch": 0.7522548455190943,
"grad_norm": 0.48046591877937317,
"learning_rate": 9.922844198204715e-05,
"loss": 0.0429,
"step": 490
},
{
"epoch": 0.7676069852235655,
"grad_norm": 0.5965185165405273,
"learning_rate": 9.913176241111319e-05,
"loss": 0.048,
"step": 500
},
{
"epoch": 0.7676069852235655,
"eval_loss": 0.045701391994953156,
"eval_runtime": 184.3585,
"eval_samples_per_second": 6.287,
"eval_steps_per_second": 6.287,
"step": 500
},
{
"epoch": 0.7829591249280369,
"grad_norm": 0.6371860504150391,
"learning_rate": 9.902943061856456e-05,
"loss": 0.0381,
"step": 510
},
{
"epoch": 0.7983112646325081,
"grad_norm": 0.4156785011291504,
"learning_rate": 9.892145837686657e-05,
"loss": 0.0628,
"step": 520
},
{
"epoch": 0.8136634043369795,
"grad_norm": 0.38985827565193176,
"learning_rate": 9.880785810737378e-05,
"loss": 0.0577,
"step": 530
},
{
"epoch": 0.8290155440414507,
"grad_norm": 0.16884198784828186,
"learning_rate": 9.868864287890083e-05,
"loss": 0.0442,
"step": 540
},
{
"epoch": 0.8443676837459221,
"grad_norm": 0.39037224650382996,
"learning_rate": 9.856382640621917e-05,
"loss": 0.0694,
"step": 550
},
{
"epoch": 0.8443676837459221,
"eval_loss": 0.047504179179668427,
"eval_runtime": 179.3233,
"eval_samples_per_second": 6.463,
"eval_steps_per_second": 6.463,
"step": 550
},
{
"epoch": 0.8597198234503934,
"grad_norm": 0.33800429105758667,
"learning_rate": 9.84334230484792e-05,
"loss": 0.0381,
"step": 560
},
{
"epoch": 0.8750719631548647,
"grad_norm": 0.23129704594612122,
"learning_rate": 9.82974478075583e-05,
"loss": 0.044,
"step": 570
},
{
"epoch": 0.890424102859336,
"grad_norm": 0.40697339177131653,
"learning_rate": 9.815591632633509e-05,
"loss": 0.0388,
"step": 580
},
{
"epoch": 0.9057762425638073,
"grad_norm": 0.21864917874336243,
"learning_rate": 9.800884488688985e-05,
"loss": 0.044,
"step": 590
},
{
"epoch": 0.9211283822682786,
"grad_norm": 0.34791189432144165,
"learning_rate": 9.785625040863124e-05,
"loss": 0.0396,
"step": 600
},
{
"epoch": 0.9211283822682786,
"eval_loss": 0.04156533628702164,
"eval_runtime": 183.4271,
"eval_samples_per_second": 6.319,
"eval_steps_per_second": 6.319,
"step": 600
},
{
"epoch": 0.93648052197275,
"grad_norm": 0.3301123082637787,
"learning_rate": 9.769815044635005e-05,
"loss": 0.036,
"step": 610
},
{
"epoch": 0.9518326616772212,
"grad_norm": 0.43030592799186707,
"learning_rate": 9.753456318819946e-05,
"loss": 0.0463,
"step": 620
},
{
"epoch": 0.9671848013816926,
"grad_norm": 0.6103232502937317,
"learning_rate": 9.736550745360292e-05,
"loss": 0.0425,
"step": 630
},
{
"epoch": 0.9825369410861639,
"grad_norm": 0.21440726518630981,
"learning_rate": 9.719100269108872e-05,
"loss": 0.043,
"step": 640
},
{
"epoch": 0.9978890807906352,
"grad_norm": 0.2889581322669983,
"learning_rate": 9.701106897605304e-05,
"loss": 0.0412,
"step": 650
},
{
"epoch": 0.9978890807906352,
"eval_loss": 0.03858696296811104,
"eval_runtime": 183.6371,
"eval_samples_per_second": 6.311,
"eval_steps_per_second": 6.311,
"step": 650
},
{
"epoch": 1.0132412204951065,
"grad_norm": 0.25601544976234436,
"learning_rate": 9.682572700845006e-05,
"loss": 0.0308,
"step": 660
},
{
"epoch": 1.0285933601995778,
"grad_norm": 0.3059068024158478,
"learning_rate": 9.663499811041082e-05,
"loss": 0.0335,
"step": 670
},
{
"epoch": 1.0439454999040492,
"grad_norm": 0.39541324973106384,
"learning_rate": 9.643890422379018e-05,
"loss": 0.0363,
"step": 680
},
{
"epoch": 1.0592976396085205,
"grad_norm": 0.5301039814949036,
"learning_rate": 9.623746790764261e-05,
"loss": 0.0306,
"step": 690
},
{
"epoch": 1.0746497793129917,
"grad_norm": 0.4379103183746338,
"learning_rate": 9.603071233562695e-05,
"loss": 0.0339,
"step": 700
},
{
"epoch": 1.0746497793129917,
"eval_loss": 0.04569621756672859,
"eval_runtime": 184.1042,
"eval_samples_per_second": 6.295,
"eval_steps_per_second": 6.295,
"step": 700
},
{
"epoch": 1.090001919017463,
"grad_norm": 0.12664569914340973,
"learning_rate": 9.581866129334044e-05,
"loss": 0.0407,
"step": 710
},
{
"epoch": 1.1053540587219344,
"grad_norm": 0.3708535432815552,
"learning_rate": 9.560133917558242e-05,
"loss": 0.0452,
"step": 720
},
{
"epoch": 1.1207061984264057,
"grad_norm": 0.3598388135433197,
"learning_rate": 9.537877098354786e-05,
"loss": 0.0334,
"step": 730
},
{
"epoch": 1.136058338130877,
"grad_norm": 0.2653886079788208,
"learning_rate": 9.51509823219512e-05,
"loss": 0.0274,
"step": 740
},
{
"epoch": 1.1514104778353482,
"grad_norm": 0.32400548458099365,
"learning_rate": 9.491799939608065e-05,
"loss": 0.0357,
"step": 750
},
{
"epoch": 1.1514104778353482,
"eval_loss": 0.04340927302837372,
"eval_runtime": 183.8858,
"eval_samples_per_second": 6.303,
"eval_steps_per_second": 6.303,
"step": 750
},
{
"epoch": 1.1667626175398196,
"grad_norm": 0.3891257345676422,
"learning_rate": 9.467984900878364e-05,
"loss": 0.0303,
"step": 760
},
{
"epoch": 1.182114757244291,
"grad_norm": 0.25605306029319763,
"learning_rate": 9.443655855738321e-05,
"loss": 0.0269,
"step": 770
},
{
"epoch": 1.1974668969487623,
"grad_norm": 0.3143906593322754,
"learning_rate": 9.41881560305262e-05,
"loss": 0.0367,
"step": 780
},
{
"epoch": 1.2128190366532334,
"grad_norm": 0.41081351041793823,
"learning_rate": 9.393467000496344e-05,
"loss": 0.0384,
"step": 790
},
{
"epoch": 1.2281711763577048,
"grad_norm": 0.35828566551208496,
"learning_rate": 9.367612964226218e-05,
"loss": 0.0336,
"step": 800
},
{
"epoch": 1.2281711763577048,
"eval_loss": 0.04084751009941101,
"eval_runtime": 183.4176,
"eval_samples_per_second": 6.319,
"eval_steps_per_second": 6.319,
"step": 800
},
{
"epoch": 1.2435233160621761,
"grad_norm": 0.2005142867565155,
"learning_rate": 9.341256468545122e-05,
"loss": 0.0262,
"step": 810
},
{
"epoch": 1.2588754557666475,
"grad_norm": 0.47820189595222473,
"learning_rate": 9.314400545559934e-05,
"loss": 0.0292,
"step": 820
},
{
"epoch": 1.2742275954711189,
"grad_norm": 0.3881331980228424,
"learning_rate": 9.287048284832698e-05,
"loss": 0.0352,
"step": 830
},
{
"epoch": 1.28957973517559,
"grad_norm": 0.4042621850967407,
"learning_rate": 9.2592028330252e-05,
"loss": 0.0328,
"step": 840
},
{
"epoch": 1.3049318748800613,
"grad_norm": 0.19656337797641754,
"learning_rate": 9.230867393536972e-05,
"loss": 0.0342,
"step": 850
},
{
"epoch": 1.3049318748800613,
"eval_loss": 0.041351333260536194,
"eval_runtime": 183.3044,
"eval_samples_per_second": 6.323,
"eval_steps_per_second": 6.323,
"step": 850
},
{
"epoch": 1.3202840145845327,
"grad_norm": 0.33282142877578735,
"learning_rate": 9.202045226136757e-05,
"loss": 0.0308,
"step": 860
},
{
"epoch": 1.335636154289004,
"grad_norm": 0.1371268481016159,
"learning_rate": 9.172739646587509e-05,
"loss": 0.0236,
"step": 870
},
{
"epoch": 1.3509882939934754,
"grad_norm": 0.1828700453042984,
"learning_rate": 9.142954026264931e-05,
"loss": 0.0305,
"step": 880
},
{
"epoch": 1.3663404336979466,
"grad_norm": 0.22593112289905548,
"learning_rate": 9.112691791769634e-05,
"loss": 0.0259,
"step": 890
},
{
"epoch": 1.381692573402418,
"grad_norm": 0.4628385603427887,
"learning_rate": 9.081956424532926e-05,
"loss": 0.0307,
"step": 900
},
{
"epoch": 1.381692573402418,
"eval_loss": 0.04069655388593674,
"eval_runtime": 183.3198,
"eval_samples_per_second": 6.322,
"eval_steps_per_second": 6.322,
"step": 900
},
{
"epoch": 1.3970447131068893,
"grad_norm": 0.2739456593990326,
"learning_rate": 9.050751460416305e-05,
"loss": 0.0351,
"step": 910
},
{
"epoch": 1.4123968528113606,
"grad_norm": 0.17110799252986908,
"learning_rate": 9.019080489304685e-05,
"loss": 0.0365,
"step": 920
},
{
"epoch": 1.427748992515832,
"grad_norm": 0.43909764289855957,
"learning_rate": 8.986947154693408e-05,
"loss": 0.0514,
"step": 930
},
{
"epoch": 1.4431011322203031,
"grad_norm": 0.3460999131202698,
"learning_rate": 8.954355153269088e-05,
"loss": 0.0431,
"step": 940
},
{
"epoch": 1.4584532719247745,
"grad_norm": 0.255986750125885,
"learning_rate": 8.921308234484336e-05,
"loss": 0.0312,
"step": 950
},
{
"epoch": 1.4584532719247745,
"eval_loss": 0.0378691703081131,
"eval_runtime": 183.3363,
"eval_samples_per_second": 6.322,
"eval_steps_per_second": 6.322,
"step": 950
},
{
"epoch": 1.4738054116292458,
"grad_norm": 0.3084361255168915,
"learning_rate": 8.887810200126419e-05,
"loss": 0.0333,
"step": 960
},
{
"epoch": 1.4891575513337172,
"grad_norm": 0.404434472322464,
"learning_rate": 8.853864903879889e-05,
"loss": 0.0381,
"step": 970
},
{
"epoch": 1.5045096910381885,
"grad_norm": 0.3657182455062866,
"learning_rate": 8.81947625088325e-05,
"loss": 0.0371,
"step": 980
},
{
"epoch": 1.5198618307426597,
"grad_norm": 0.24479487538337708,
"learning_rate": 8.784648197279701e-05,
"loss": 0.0327,
"step": 990
},
{
"epoch": 1.535213970447131,
"grad_norm": 0.3580532371997833,
"learning_rate": 8.749384749762015e-05,
"loss": 0.0314,
"step": 1000
},
{
"epoch": 1.535213970447131,
"eval_loss": 0.0391918309032917,
"eval_runtime": 183.4538,
"eval_samples_per_second": 6.318,
"eval_steps_per_second": 6.318,
"step": 1000
},
{
"epoch": 1.5505661101516024,
"grad_norm": 0.3016396760940552,
"learning_rate": 8.713689965111602e-05,
"loss": 0.0248,
"step": 1010
},
{
"epoch": 1.5659182498560735,
"grad_norm": 0.34936264157295227,
"learning_rate": 8.677567949731801e-05,
"loss": 0.0385,
"step": 1020
},
{
"epoch": 1.581270389560545,
"grad_norm": 0.3875962197780609,
"learning_rate": 8.64102285917548e-05,
"loss": 0.0482,
"step": 1030
},
{
"epoch": 1.5966225292650162,
"grad_norm": 0.2111169546842575,
"learning_rate": 8.604058897666962e-05,
"loss": 0.0339,
"step": 1040
},
{
"epoch": 1.6119746689694876,
"grad_norm": 0.24894201755523682,
"learning_rate": 8.566680317618377e-05,
"loss": 0.0229,
"step": 1050
},
{
"epoch": 1.6119746689694876,
"eval_loss": 0.0367230549454689,
"eval_runtime": 183.3229,
"eval_samples_per_second": 6.322,
"eval_steps_per_second": 6.322,
"step": 1050
},
{
"epoch": 1.627326808673959,
"grad_norm": 0.3187296986579895,
"learning_rate": 8.528891419140438e-05,
"loss": 0.0397,
"step": 1060
},
{
"epoch": 1.64267894837843,
"grad_norm": 0.22907699644565582,
"learning_rate": 8.490696549547761e-05,
"loss": 0.0286,
"step": 1070
},
{
"epoch": 1.6580310880829017,
"grad_norm": 0.4290238618850708,
"learning_rate": 8.452100102858734e-05,
"loss": 0.0398,
"step": 1080
},
{
"epoch": 1.6733832277873728,
"grad_norm": 0.3500979542732239,
"learning_rate": 8.413106519290023e-05,
"loss": 0.0325,
"step": 1090
},
{
"epoch": 1.6887353674918442,
"grad_norm": 0.23736059665679932,
"learning_rate": 8.373720284745757e-05,
"loss": 0.0337,
"step": 1100
},
{
"epoch": 1.6887353674918442,
"eval_loss": 0.03723842650651932,
"eval_runtime": 183.1532,
"eval_samples_per_second": 6.328,
"eval_steps_per_second": 6.328,
"step": 1100
},
{
"epoch": 1.7040875071963155,
"grad_norm": 0.2514815032482147,
"learning_rate": 8.333945930301459e-05,
"loss": 0.0312,
"step": 1110
},
{
"epoch": 1.7194396469007867,
"grad_norm": 0.18682609498500824,
"learning_rate": 8.293788031682789e-05,
"loss": 0.0314,
"step": 1120
},
{
"epoch": 1.7347917866052582,
"grad_norm": 0.23153136670589447,
"learning_rate": 8.253251208739137e-05,
"loss": 0.0321,
"step": 1130
},
{
"epoch": 1.7501439263097294,
"grad_norm": 0.4451017677783966,
"learning_rate": 8.21234012491215e-05,
"loss": 0.0313,
"step": 1140
},
{
"epoch": 1.7654960660142007,
"grad_norm": 0.2963927388191223,
"learning_rate": 8.171059486699224e-05,
"loss": 0.028,
"step": 1150
},
{
"epoch": 1.7654960660142007,
"eval_loss": 0.03788375481963158,
"eval_runtime": 182.7863,
"eval_samples_per_second": 6.341,
"eval_steps_per_second": 6.341,
"step": 1150
},
{
"epoch": 1.780848205718672,
"grad_norm": 0.21196348965168,
"learning_rate": 8.129414043112087e-05,
"loss": 0.0343,
"step": 1160
},
{
"epoch": 1.7962003454231432,
"grad_norm": 0.3215029537677765,
"learning_rate": 8.087408585130438e-05,
"loss": 0.0262,
"step": 1170
},
{
"epoch": 1.8115524851276148,
"grad_norm": 0.14892415702342987,
"learning_rate": 8.045047945150796e-05,
"loss": 0.0334,
"step": 1180
},
{
"epoch": 1.826904624832086,
"grad_norm": 0.33556750416755676,
"learning_rate": 8.002336996430561e-05,
"loss": 0.034,
"step": 1190
},
{
"epoch": 1.8422567645365573,
"grad_norm": 0.11458589136600494,
"learning_rate": 7.959280652527394e-05,
"loss": 0.0191,
"step": 1200
},
{
"epoch": 1.8422567645365573,
"eval_loss": 0.03879622742533684,
"eval_runtime": 182.5356,
"eval_samples_per_second": 6.349,
"eval_steps_per_second": 6.349,
"step": 1200
},
{
"epoch": 1.8576089042410286,
"grad_norm": 0.2250894010066986,
"learning_rate": 7.915883866733946e-05,
"loss": 0.036,
"step": 1210
},
{
"epoch": 1.8729610439454998,
"grad_norm": 0.42386719584465027,
"learning_rate": 7.872151631508022e-05,
"loss": 0.031,
"step": 1220
},
{
"epoch": 1.8883131836499714,
"grad_norm": 0.2043813019990921,
"learning_rate": 7.828088977898234e-05,
"loss": 0.033,
"step": 1230
},
{
"epoch": 1.9036653233544425,
"grad_norm": 0.29343181848526,
"learning_rate": 7.783700974965225e-05,
"loss": 0.0449,
"step": 1240
},
{
"epoch": 1.9190174630589139,
"grad_norm": 0.20610815286636353,
"learning_rate": 7.738992729198511e-05,
"loss": 0.0348,
"step": 1250
},
{
"epoch": 1.9190174630589139,
"eval_loss": 0.04106801748275757,
"eval_runtime": 182.8394,
"eval_samples_per_second": 6.339,
"eval_steps_per_second": 6.339,
"step": 1250
},
{
"epoch": 1.9343696027633852,
"grad_norm": 0.16478237509727478,
"learning_rate": 7.693969383929017e-05,
"loss": 0.0232,
"step": 1260
},
{
"epoch": 1.9497217424678563,
"grad_norm": 0.35434988141059875,
"learning_rate": 7.648636118737385e-05,
"loss": 0.0379,
"step": 1270
},
{
"epoch": 1.965073882172328,
"grad_norm": 0.4479084014892578,
"learning_rate": 7.602998148858089e-05,
"loss": 0.0288,
"step": 1280
},
{
"epoch": 1.980426021876799,
"grad_norm": 0.3000826835632324,
"learning_rate": 7.557060724579484e-05,
"loss": 0.0315,
"step": 1290
},
{
"epoch": 1.9957781615812704,
"grad_norm": 0.6401015520095825,
"learning_rate": 7.51082913063978e-05,
"loss": 0.0469,
"step": 1300
},
{
"epoch": 1.9957781615812704,
"eval_loss": 0.03991750255227089,
"eval_runtime": 182.8476,
"eval_samples_per_second": 6.339,
"eval_steps_per_second": 6.339,
"step": 1300
},
{
"epoch": 2.0111303012857418,
"grad_norm": 0.246770977973938,
"learning_rate": 7.464308685619099e-05,
"loss": 0.026,
"step": 1310
},
{
"epoch": 2.026482440990213,
"grad_norm": 0.3887307941913605,
"learning_rate": 7.417504741327587e-05,
"loss": 0.0166,
"step": 1320
},
{
"epoch": 2.0418345806946845,
"grad_norm": 0.4505835771560669,
"learning_rate": 7.370422682189755e-05,
"loss": 0.0212,
"step": 1330
},
{
"epoch": 2.0571867203991556,
"grad_norm": 0.240691676735878,
"learning_rate": 7.323067924625024e-05,
"loss": 0.0163,
"step": 1340
},
{
"epoch": 2.0725388601036268,
"grad_norm": 0.29849642515182495,
"learning_rate": 7.275445916424627e-05,
"loss": 0.0193,
"step": 1350
},
{
"epoch": 2.0725388601036268,
"eval_loss": 0.04122938960790634,
"eval_runtime": 182.6465,
"eval_samples_per_second": 6.346,
"eval_steps_per_second": 6.346,
"step": 1350
},
{
"epoch": 2.0878909998080983,
"grad_norm": 0.45513495802879333,
"learning_rate": 7.227562136124864e-05,
"loss": 0.0151,
"step": 1360
},
{
"epoch": 2.1032431395125695,
"grad_norm": 0.3095690608024597,
"learning_rate": 7.179422092376856e-05,
"loss": 0.0158,
"step": 1370
},
{
"epoch": 2.118595279217041,
"grad_norm": 0.5590988397598267,
"learning_rate": 7.13103132331281e-05,
"loss": 0.0182,
"step": 1380
},
{
"epoch": 2.133947418921512,
"grad_norm": 0.5942097902297974,
"learning_rate": 7.082395395908903e-05,
"loss": 0.0232,
"step": 1390
},
{
"epoch": 2.1492995586259833,
"grad_norm": 0.267322838306427,
"learning_rate": 7.033519905344846e-05,
"loss": 0.0168,
"step": 1400
},
{
"epoch": 2.1492995586259833,
"eval_loss": 0.041591525077819824,
"eval_runtime": 182.3963,
"eval_samples_per_second": 6.354,
"eval_steps_per_second": 6.354,
"step": 1400
},
{
"epoch": 2.164651698330455,
"grad_norm": 0.26787418127059937,
"learning_rate": 6.984410474360195e-05,
"loss": 0.021,
"step": 1410
},
{
"epoch": 2.180003838034926,
"grad_norm": 0.3475477695465088,
"learning_rate": 6.935072752607511e-05,
"loss": 0.0253,
"step": 1420
},
{
"epoch": 2.1953559777393976,
"grad_norm": 0.2828027904033661,
"learning_rate": 6.885512416002412e-05,
"loss": 0.0262,
"step": 1430
},
{
"epoch": 2.2107081174438687,
"grad_norm": 0.24283908307552338,
"learning_rate": 6.835735166070587e-05,
"loss": 0.0189,
"step": 1440
},
{
"epoch": 2.22606025714834,
"grad_norm": 0.21727143228054047,
"learning_rate": 6.785746729291897e-05,
"loss": 0.019,
"step": 1450
},
{
"epoch": 2.22606025714834,
"eval_loss": 0.03900735080242157,
"eval_runtime": 182.0175,
"eval_samples_per_second": 6.368,
"eval_steps_per_second": 6.368,
"step": 1450
},
{
"epoch": 2.2414123968528115,
"grad_norm": 0.34137997031211853,
"learning_rate": 6.735552856441585e-05,
"loss": 0.0256,
"step": 1460
},
{
"epoch": 2.2567645365572826,
"grad_norm": 0.3654659688472748,
"learning_rate": 6.685159321928691e-05,
"loss": 0.0202,
"step": 1470
},
{
"epoch": 2.272116676261754,
"grad_norm": 0.39032450318336487,
"learning_rate": 6.634571923131756e-05,
"loss": 0.0216,
"step": 1480
},
{
"epoch": 2.2874688159662253,
"grad_norm": 0.1562185287475586,
"learning_rate": 6.583796479731872e-05,
"loss": 0.0197,
"step": 1490
},
{
"epoch": 2.3028209556706964,
"grad_norm": 0.2628200054168701,
"learning_rate": 6.532838833043189e-05,
"loss": 0.0268,
"step": 1500
},
{
"epoch": 2.3028209556706964,
"eval_loss": 0.03901956230401993,
"eval_runtime": 181.9429,
"eval_samples_per_second": 6.37,
"eval_steps_per_second": 6.37,
"step": 1500
},
{
"epoch": 2.318173095375168,
"grad_norm": 0.21099306643009186,
"learning_rate": 6.481704845340894e-05,
"loss": 0.0241,
"step": 1510
},
{
"epoch": 2.333525235079639,
"grad_norm": 0.40053361654281616,
"learning_rate": 6.43040039918683e-05,
"loss": 0.0212,
"step": 1520
},
{
"epoch": 2.3488773747841103,
"grad_norm": 0.4750055968761444,
"learning_rate": 6.37893139675273e-05,
"loss": 0.0188,
"step": 1530
},
{
"epoch": 2.364229514488582,
"grad_norm": 0.3320261240005493,
"learning_rate": 6.327303759141235e-05,
"loss": 0.0184,
"step": 1540
},
{
"epoch": 2.379581654193053,
"grad_norm": 0.4511406719684601,
"learning_rate": 6.275523425704707e-05,
"loss": 0.0221,
"step": 1550
},
{
"epoch": 2.379581654193053,
"eval_loss": 0.0411539301276207,
"eval_runtime": 181.9563,
"eval_samples_per_second": 6.37,
"eval_steps_per_second": 6.37,
"step": 1550
},
{
"epoch": 2.3949337938975246,
"grad_norm": 0.2297985851764679,
"learning_rate": 6.223596353361961e-05,
"loss": 0.0225,
"step": 1560
},
{
"epoch": 2.4102859336019957,
"grad_norm": 0.04028713330626488,
"learning_rate": 6.171528515912965e-05,
"loss": 0.0121,
"step": 1570
},
{
"epoch": 2.425638073306467,
"grad_norm": 0.39342209696769714,
"learning_rate": 6.119325903351599e-05,
"loss": 0.0191,
"step": 1580
},
{
"epoch": 2.4409902130109384,
"grad_norm": 0.42913126945495605,
"learning_rate": 6.0669945211765585e-05,
"loss": 0.0256,
"step": 1590
},
{
"epoch": 2.4563423527154096,
"grad_norm": 0.48924121260643005,
"learning_rate": 6.0145403897004696e-05,
"loss": 0.0264,
"step": 1600
},
{
"epoch": 2.4563423527154096,
"eval_loss": 0.0407683365046978,
"eval_runtime": 182.2649,
"eval_samples_per_second": 6.359,
"eval_steps_per_second": 6.359,
"step": 1600
},
{
"epoch": 2.471694492419881,
"grad_norm": 0.49831509590148926,
"learning_rate": 5.961969543357292e-05,
"loss": 0.0215,
"step": 1610
},
{
"epoch": 2.4870466321243523,
"grad_norm": 0.24919560551643372,
"learning_rate": 5.9092880300081123e-05,
"loss": 0.0237,
"step": 1620
},
{
"epoch": 2.5023987718288234,
"grad_norm": 0.5007616281509399,
"learning_rate": 5.8565019102453844e-05,
"loss": 0.0208,
"step": 1630
},
{
"epoch": 2.517750911533295,
"grad_norm": 0.3119317293167114,
"learning_rate": 5.8036172566957006e-05,
"loss": 0.0211,
"step": 1640
},
{
"epoch": 2.533103051237766,
"grad_norm": 0.46274900436401367,
"learning_rate": 5.750640153321194e-05,
"loss": 0.0248,
"step": 1650
},
{
"epoch": 2.533103051237766,
"eval_loss": 0.03897058963775635,
"eval_runtime": 182.3518,
"eval_samples_per_second": 6.356,
"eval_steps_per_second": 6.356,
"step": 1650
},
{
"epoch": 2.5484551909422377,
"grad_norm": 0.46100637316703796,
"learning_rate": 5.697576694719616e-05,
"loss": 0.0204,
"step": 1660
},
{
"epoch": 2.563807330646709,
"grad_norm": 0.18104247748851776,
"learning_rate": 5.644432985423206e-05,
"loss": 0.0229,
"step": 1670
},
{
"epoch": 2.57915947035118,
"grad_norm": 0.3415425419807434,
"learning_rate": 5.591215139196414e-05,
"loss": 0.0227,
"step": 1680
},
{
"epoch": 2.5945116100556516,
"grad_norm": 0.28963977098464966,
"learning_rate": 5.5379292783325585e-05,
"loss": 0.0284,
"step": 1690
},
{
"epoch": 2.6098637497601227,
"grad_norm": 0.48060303926467896,
"learning_rate": 5.4845815329495054e-05,
"loss": 0.018,
"step": 1700
},
{
"epoch": 2.6098637497601227,
"eval_loss": 0.039676472544670105,
"eval_runtime": 182.2656,
"eval_samples_per_second": 6.359,
"eval_steps_per_second": 6.359,
"step": 1700
},
{
"epoch": 2.6252158894645943,
"grad_norm": 0.22176386415958405,
"learning_rate": 5.431178040284446e-05,
"loss": 0.0213,
"step": 1710
},
{
"epoch": 2.6405680291690654,
"grad_norm": 0.6175304055213928,
"learning_rate": 5.377724943987855e-05,
"loss": 0.0162,
"step": 1720
},
{
"epoch": 2.6559201688735365,
"grad_norm": 0.15105417370796204,
"learning_rate": 5.324228393416718e-05,
"loss": 0.0172,
"step": 1730
},
{
"epoch": 2.671272308578008,
"grad_norm": 0.47135475277900696,
"learning_rate": 5.270694542927088e-05,
"loss": 0.0229,
"step": 1740
},
{
"epoch": 2.6866244482824793,
"grad_norm": 0.3154067397117615,
"learning_rate": 5.21712955116608e-05,
"loss": 0.0148,
"step": 1750
},
{
"epoch": 2.6866244482824793,
"eval_loss": 0.04060601443052292,
"eval_runtime": 182.6351,
"eval_samples_per_second": 6.346,
"eval_steps_per_second": 6.346,
"step": 1750
},
{
"epoch": 2.701976587986951,
"grad_norm": 0.35861366987228394,
"learning_rate": 5.1635395803633666e-05,
"loss": 0.02,
"step": 1760
},
{
"epoch": 2.717328727691422,
"grad_norm": 0.20100897550582886,
"learning_rate": 5.109930795622265e-05,
"loss": 0.0159,
"step": 1770
},
{
"epoch": 2.732680867395893,
"grad_norm": 0.12627644836902618,
"learning_rate": 5.056309364210483e-05,
"loss": 0.0172,
"step": 1780
},
{
"epoch": 2.7480330071003647,
"grad_norm": 0.39943668246269226,
"learning_rate": 5.002681454850632e-05,
"loss": 0.0261,
"step": 1790
},
{
"epoch": 2.763385146804836,
"grad_norm": 0.2878319025039673,
"learning_rate": 4.949053237010554e-05,
"loss": 0.0228,
"step": 1800
},
{
"epoch": 2.763385146804836,
"eval_loss": 0.04158102348446846,
"eval_runtime": 184.1784,
"eval_samples_per_second": 6.293,
"eval_steps_per_second": 6.293,
"step": 1800
},
{
"epoch": 2.7787372865093074,
"grad_norm": 0.33815181255340576,
"learning_rate": 4.89543088019359e-05,
"loss": 0.0146,
"step": 1810
},
{
"epoch": 2.7940894262137785,
"grad_norm": 0.25010645389556885,
"learning_rate": 4.841820553228805e-05,
"loss": 0.0284,
"step": 1820
},
{
"epoch": 2.8094415659182497,
"grad_norm": 0.24456074833869934,
"learning_rate": 4.7882284235613324e-05,
"loss": 0.0231,
"step": 1830
},
{
"epoch": 2.8247937056227213,
"grad_norm": 0.4674700200557709,
"learning_rate": 4.734660656542846e-05,
"loss": 0.0205,
"step": 1840
},
{
"epoch": 2.8401458453271924,
"grad_norm": 0.20736347138881683,
"learning_rate": 4.681123414722291e-05,
"loss": 0.0216,
"step": 1850
},
{
"epoch": 2.8401458453271924,
"eval_loss": 0.039232347160577774,
"eval_runtime": 185.7679,
"eval_samples_per_second": 6.239,
"eval_steps_per_second": 6.239,
"step": 1850
},
{
"epoch": 2.855497985031664,
"grad_norm": 0.26255229115486145,
"learning_rate": 4.627622857136929e-05,
"loss": 0.0188,
"step": 1860
},
{
"epoch": 2.870850124736135,
"grad_norm": 0.6380086541175842,
"learning_rate": 4.5741651386037883e-05,
"loss": 0.0257,
"step": 1870
},
{
"epoch": 2.8862022644406062,
"grad_norm": 0.32341963052749634,
"learning_rate": 4.5207564090116064e-05,
"loss": 0.0181,
"step": 1880
},
{
"epoch": 2.901554404145078,
"grad_norm": 0.46333467960357666,
"learning_rate": 4.467402812613323e-05,
"loss": 0.0149,
"step": 1890
},
{
"epoch": 2.916906543849549,
"grad_norm": 0.2776954472064972,
"learning_rate": 4.414110487319244e-05,
"loss": 0.021,
"step": 1900
},
{
"epoch": 2.916906543849549,
"eval_loss": 0.03955698385834694,
"eval_runtime": 187.6459,
"eval_samples_per_second": 6.177,
"eval_steps_per_second": 6.177,
"step": 1900
},
{
"epoch": 2.9322586835540205,
"grad_norm": 0.25150156021118164,
"learning_rate": 4.360885563990919e-05,
"loss": 0.0214,
"step": 1910
},
{
"epoch": 2.9476108232584917,
"grad_norm": 0.3586250841617584,
"learning_rate": 4.307734165735829e-05,
"loss": 0.0137,
"step": 1920
},
{
"epoch": 2.962962962962963,
"grad_norm": 0.36356136202812195,
"learning_rate": 4.254662407202976e-05,
"loss": 0.0217,
"step": 1930
},
{
"epoch": 2.9783151026674344,
"grad_norm": 0.31834840774536133,
"learning_rate": 4.201676393879446e-05,
"loss": 0.019,
"step": 1940
},
{
"epoch": 2.9936672423719055,
"grad_norm": 0.25819826126098633,
"learning_rate": 4.148782221388007e-05,
"loss": 0.016,
"step": 1950
},
{
"epoch": 2.9936672423719055,
"eval_loss": 0.03930049389600754,
"eval_runtime": 188.9697,
"eval_samples_per_second": 6.133,
"eval_steps_per_second": 6.133,
"step": 1950
},
{
"epoch": 3.009019382076377,
"grad_norm": 0.2772454023361206,
"learning_rate": 4.0959859747858706e-05,
"loss": 0.0102,
"step": 1960
},
{
"epoch": 3.0243715217808482,
"grad_norm": 0.3966231346130371,
"learning_rate": 4.043293727864644e-05,
"loss": 0.0077,
"step": 1970
},
{
"epoch": 3.0397236614853194,
"grad_norm": 0.42859339714050293,
"learning_rate": 3.990711542451591e-05,
"loss": 0.012,
"step": 1980
},
{
"epoch": 3.055075801189791,
"grad_norm": 0.2832743525505066,
"learning_rate": 3.9382454677122704e-05,
"loss": 0.0102,
"step": 1990
},
{
"epoch": 3.070427940894262,
"grad_norm": 0.1697220504283905,
"learning_rate": 3.885901539454623e-05,
"loss": 0.0055,
"step": 2000
},
{
"epoch": 3.070427940894262,
"eval_loss": 0.044558048248291016,
"eval_runtime": 190.1014,
"eval_samples_per_second": 6.097,
"eval_steps_per_second": 6.097,
"step": 2000
},
{
"epoch": 3.0857800805987337,
"grad_norm": 0.5966256260871887,
"learning_rate": 3.833685779434597e-05,
"loss": 0.0119,
"step": 2010
},
{
"epoch": 3.101132220303205,
"grad_norm": 0.0359627939760685,
"learning_rate": 3.7816041946634024e-05,
"loss": 0.0081,
"step": 2020
},
{
"epoch": 3.116484360007676,
"grad_norm": 0.2953304946422577,
"learning_rate": 3.729662776716439e-05,
"loss": 0.0091,
"step": 2030
},
{
"epoch": 3.1318364997121475,
"grad_norm": 0.3065471053123474,
"learning_rate": 3.677867501044029e-05,
"loss": 0.0048,
"step": 2040
},
{
"epoch": 3.1471886394166186,
"grad_norm": 0.22076724469661713,
"learning_rate": 3.6262243262839654e-05,
"loss": 0.0128,
"step": 2050
},
{
"epoch": 3.1471886394166186,
"eval_loss": 0.046355538070201874,
"eval_runtime": 190.2797,
"eval_samples_per_second": 6.091,
"eval_steps_per_second": 6.091,
"step": 2050
},
{
"epoch": 3.16254077912109,
"grad_norm": 0.04143872112035751,
"learning_rate": 3.574739193576042e-05,
"loss": 0.0075,
"step": 2060
},
{
"epoch": 3.1778929188255614,
"grad_norm": 0.21622024476528168,
"learning_rate": 3.5234180258785554e-05,
"loss": 0.0072,
"step": 2070
},
{
"epoch": 3.1932450585300325,
"grad_norm": 0.10329017788171768,
"learning_rate": 3.472266727286928e-05,
"loss": 0.0082,
"step": 2080
},
{
"epoch": 3.208597198234504,
"grad_norm": 0.15324845910072327,
"learning_rate": 3.4212911823544746e-05,
"loss": 0.0077,
"step": 2090
},
{
"epoch": 3.223949337938975,
"grad_norm": 0.2115231454372406,
"learning_rate": 3.370497255415443e-05,
"loss": 0.0105,
"step": 2100
},
{
"epoch": 3.223949337938975,
"eval_loss": 0.046613357961177826,
"eval_runtime": 191.1119,
"eval_samples_per_second": 6.065,
"eval_steps_per_second": 6.065,
"step": 2100
},
{
"epoch": 3.239301477643447,
"grad_norm": 0.6683870553970337,
"learning_rate": 3.319890789910364e-05,
"loss": 0.0125,
"step": 2110
},
{
"epoch": 3.254653617347918,
"grad_norm": 0.24559286236763,
"learning_rate": 3.269477607713802e-05,
"loss": 0.0114,
"step": 2120
},
{
"epoch": 3.270005757052389,
"grad_norm": 0.5085189938545227,
"learning_rate": 3.219263508464614e-05,
"loss": 0.0104,
"step": 2130
},
{
"epoch": 3.2853578967568606,
"grad_norm": 0.7053877711296082,
"learning_rate": 3.169254268898725e-05,
"loss": 0.0081,
"step": 2140
},
{
"epoch": 3.3007100364613318,
"grad_norm": 0.04670366272330284,
"learning_rate": 3.119455642184572e-05,
"loss": 0.009,
"step": 2150
},
{
"epoch": 3.3007100364613318,
"eval_loss": 0.045028459280729294,
"eval_runtime": 192.2209,
"eval_samples_per_second": 6.03,
"eval_steps_per_second": 6.03,
"step": 2150
},
{
"epoch": 3.3160621761658033,
"grad_norm": 0.1904493123292923,
"learning_rate": 3.069873357261249e-05,
"loss": 0.0089,
"step": 2160
},
{
"epoch": 3.3314143158702745,
"grad_norm": 0.1548696756362915,
"learning_rate": 3.020513118179428e-05,
"loss": 0.0126,
"step": 2170
},
{
"epoch": 3.3467664555747456,
"grad_norm": 0.22131961584091187,
"learning_rate": 2.9713806034451652e-05,
"loss": 0.0086,
"step": 2180
},
{
"epoch": 3.362118595279217,
"grad_norm": 0.031989939510822296,
"learning_rate": 2.9224814653666242e-05,
"loss": 0.0093,
"step": 2190
},
{
"epoch": 3.3774707349836883,
"grad_norm": 0.41190671920776367,
"learning_rate": 2.8738213294038212e-05,
"loss": 0.0087,
"step": 2200
},
{
"epoch": 3.3774707349836883,
"eval_loss": 0.04871184378862381,
"eval_runtime": 193.1751,
"eval_samples_per_second": 6.0,
"eval_steps_per_second": 6.0,
"step": 2200
},
{
"epoch": 3.39282287468816,
"grad_norm": 0.6765581369400024,
"learning_rate": 2.8254057935214735e-05,
"loss": 0.0092,
"step": 2210
},
{
"epoch": 3.408175014392631,
"grad_norm": 0.07496938109397888,
"learning_rate": 2.7772404275449825e-05,
"loss": 0.0099,
"step": 2220
},
{
"epoch": 3.423527154097102,
"grad_norm": 0.20954781770706177,
"learning_rate": 2.7293307725196793e-05,
"loss": 0.0082,
"step": 2230
},
{
"epoch": 3.4388792938015738,
"grad_norm": 0.09017032384872437,
"learning_rate": 2.6816823400733625e-05,
"loss": 0.0075,
"step": 2240
},
{
"epoch": 3.454231433506045,
"grad_norm": 0.46747562289237976,
"learning_rate": 2.6343006117822437e-05,
"loss": 0.0102,
"step": 2250
},
{
"epoch": 3.454231433506045,
"eval_loss": 0.04728643596172333,
"eval_runtime": 194.1858,
"eval_samples_per_second": 5.969,
"eval_steps_per_second": 5.969,
"step": 2250
},
{
"epoch": 3.469583573210516,
"grad_norm": 0.43796277046203613,
"learning_rate": 2.587191038540317e-05,
"loss": 0.0091,
"step": 2260
},
{
"epoch": 3.4849357129149876,
"grad_norm": 0.06554798781871796,
"learning_rate": 2.5403590399322886e-05,
"loss": 0.003,
"step": 2270
},
{
"epoch": 3.5002878526194587,
"grad_norm": 0.26366013288497925,
"learning_rate": 2.493810003610092e-05,
"loss": 0.0088,
"step": 2280
},
{
"epoch": 3.51563999232393,
"grad_norm": 0.1698417365550995,
"learning_rate": 2.4475492846730835e-05,
"loss": 0.012,
"step": 2290
},
{
"epoch": 3.5309921320284015,
"grad_norm": 0.22482936084270477,
"learning_rate": 2.4015822050519794e-05,
"loss": 0.007,
"step": 2300
},
{
"epoch": 3.5309921320284015,
"eval_loss": 0.04855369031429291,
"eval_runtime": 193.8544,
"eval_samples_per_second": 5.979,
"eval_steps_per_second": 5.979,
"step": 2300
},
{
"epoch": 3.546344271732873,
"grad_norm": 0.5466733574867249,
"learning_rate": 2.3559140528966074e-05,
"loss": 0.0051,
"step": 2310
},
{
"epoch": 3.561696411437344,
"grad_norm": 0.18867827951908112,
"learning_rate": 2.3105500819675495e-05,
"loss": 0.0087,
"step": 2320
},
{
"epoch": 3.5770485511418153,
"grad_norm": 0.27712732553482056,
"learning_rate": 2.265495511031742e-05,
"loss": 0.0113,
"step": 2330
},
{
"epoch": 3.5924006908462864,
"grad_norm": 0.22945642471313477,
"learning_rate": 2.2207555232620893e-05,
"loss": 0.0101,
"step": 2340
},
{
"epoch": 3.607752830550758,
"grad_norm": 0.10571388900279999,
"learning_rate": 2.1763352656411785e-05,
"loss": 0.0113,
"step": 2350
},
{
"epoch": 3.607752830550758,
"eval_loss": 0.048977114260196686,
"eval_runtime": 194.5248,
"eval_samples_per_second": 5.958,
"eval_steps_per_second": 5.958,
"step": 2350
},
{
"epoch": 3.6231049702552296,
"grad_norm": 0.23856189846992493,
"learning_rate": 2.1322398483691787e-05,
"loss": 0.0045,
"step": 2360
},
{
"epoch": 3.6384571099597007,
"grad_norm": 0.40275660157203674,
"learning_rate": 2.08847434427593e-05,
"loss": 0.0083,
"step": 2370
},
{
"epoch": 3.653809249664172,
"grad_norm": 0.07861816138029099,
"learning_rate": 2.0450437882373697e-05,
"loss": 0.0065,
"step": 2380
},
{
"epoch": 3.669161389368643,
"grad_norm": 0.29640939831733704,
"learning_rate": 2.0019531765962995e-05,
"loss": 0.0086,
"step": 2390
},
{
"epoch": 3.6845135290731146,
"grad_norm": 0.329661101102829,
"learning_rate": 1.9592074665876026e-05,
"loss": 0.0066,
"step": 2400
},
{
"epoch": 3.6845135290731146,
"eval_loss": 0.05218956619501114,
"eval_runtime": 195.5221,
"eval_samples_per_second": 5.928,
"eval_steps_per_second": 5.928,
"step": 2400
},
{
"epoch": 3.6998656687775857,
"grad_norm": 0.06471792608499527,
"learning_rate": 1.9168115757679535e-05,
"loss": 0.0091,
"step": 2410
},
{
"epoch": 3.7152178084820573,
"grad_norm": 0.1296943873167038,
"learning_rate": 1.8747703814500866e-05,
"loss": 0.0065,
"step": 2420
},
{
"epoch": 3.7305699481865284,
"grad_norm": 0.4820161461830139,
"learning_rate": 1.833088720141698e-05,
"loss": 0.0058,
"step": 2430
},
{
"epoch": 3.7459220878909996,
"grad_norm": 0.27282992005348206,
"learning_rate": 1.7917713869890557e-05,
"loss": 0.0085,
"step": 2440
},
{
"epoch": 3.761274227595471,
"grad_norm": 0.1417207419872284,
"learning_rate": 1.7508231352253435e-05,
"loss": 0.0064,
"step": 2450
},
{
"epoch": 3.761274227595471,
"eval_loss": 0.05100034922361374,
"eval_runtime": 196.0337,
"eval_samples_per_second": 5.912,
"eval_steps_per_second": 5.912,
"step": 2450
},
{
"epoch": 3.7766263672999423,
"grad_norm": 0.03955746442079544,
"learning_rate": 1.7102486756238435e-05,
"loss": 0.007,
"step": 2460
},
{
"epoch": 3.791978507004414,
"grad_norm": 0.3051344156265259,
"learning_rate": 1.6700526759560002e-05,
"loss": 0.0079,
"step": 2470
},
{
"epoch": 3.807330646708885,
"grad_norm": 0.5592838525772095,
"learning_rate": 1.6302397604544257e-05,
"loss": 0.0106,
"step": 2480
},
{
"epoch": 3.822682786413356,
"grad_norm": 0.054226912558078766,
"learning_rate": 1.5908145092809272e-05,
"loss": 0.0058,
"step": 2490
},
{
"epoch": 3.8380349261178277,
"grad_norm": 0.15750406682491302,
"learning_rate": 1.551781457999586e-05,
"loss": 0.0095,
"step": 2500
},
{
"epoch": 3.8380349261178277,
"eval_loss": 0.05139421671628952,
"eval_runtime": 196.2586,
"eval_samples_per_second": 5.905,
"eval_steps_per_second": 5.905,
"step": 2500
},
{
"epoch": 3.853387065822299,
"grad_norm": 0.28091803193092346,
"learning_rate": 1.513145097054977e-05,
"loss": 0.0076,
"step": 2510
},
{
"epoch": 3.8687392055267704,
"grad_norm": 0.11250684410333633,
"learning_rate": 1.4749098712555854e-05,
"loss": 0.0065,
"step": 2520
},
{
"epoch": 3.8840913452312416,
"grad_norm": 0.06436865776777267,
"learning_rate": 1.4370801792624656e-05,
"loss": 0.0043,
"step": 2530
},
{
"epoch": 3.8994434849357127,
"grad_norm": 0.45863327383995056,
"learning_rate": 1.399660373083203e-05,
"loss": 0.0087,
"step": 2540
},
{
"epoch": 3.9147956246401843,
"grad_norm": 0.11186732351779938,
"learning_rate": 1.3626547575712545e-05,
"loss": 0.0089,
"step": 2550
},
{
"epoch": 3.9147956246401843,
"eval_loss": 0.052093155682086945,
"eval_runtime": 195.7248,
"eval_samples_per_second": 5.922,
"eval_steps_per_second": 5.922,
"step": 2550
},
{
"epoch": 3.9301477643446554,
"grad_norm": 0.45800843834877014,
"learning_rate": 1.3260675899307096e-05,
"loss": 0.0124,
"step": 2560
},
{
"epoch": 3.945499904049127,
"grad_norm": 0.0732005164027214,
"learning_rate": 1.2899030792265349e-05,
"loss": 0.0068,
"step": 2570
},
{
"epoch": 3.960852043753598,
"grad_norm": 0.22500959038734436,
"learning_rate": 1.2541653859003437e-05,
"loss": 0.0096,
"step": 2580
},
{
"epoch": 3.9762041834580693,
"grad_norm": 0.3719674050807953,
"learning_rate": 1.2188586212917846e-05,
"loss": 0.0111,
"step": 2590
},
{
"epoch": 3.991556323162541,
"grad_norm": 0.27437856793403625,
"learning_rate": 1.1839868471655523e-05,
"loss": 0.0065,
"step": 2600
},
{
"epoch": 3.991556323162541,
"eval_loss": 0.05236229673027992,
"eval_runtime": 196.4836,
"eval_samples_per_second": 5.899,
"eval_steps_per_second": 5.899,
"step": 2600
},
{
"epoch": 4.006908462867012,
"grad_norm": 0.07442650198936462,
"learning_rate": 1.1495540752441235e-05,
"loss": 0.0104,
"step": 2610
},
{
"epoch": 4.0222606025714835,
"grad_norm": 0.1319025605916977,
"learning_rate": 1.1155642667462318e-05,
"loss": 0.0031,
"step": 2620
},
{
"epoch": 4.037612742275955,
"grad_norm": 0.05932845175266266,
"learning_rate": 1.082021331931164e-05,
"loss": 0.0052,
"step": 2630
},
{
"epoch": 4.052964881980426,
"grad_norm": 0.18610794842243195,
"learning_rate": 1.0489291296489152e-05,
"loss": 0.0037,
"step": 2640
},
{
"epoch": 4.068317021684897,
"grad_norm": 0.07096968591213226,
"learning_rate": 1.0162914668962631e-05,
"loss": 0.0034,
"step": 2650
},
{
"epoch": 4.068317021684897,
"eval_loss": 0.053961481899023056,
"eval_runtime": 197.3498,
"eval_samples_per_second": 5.873,
"eval_steps_per_second": 5.873,
"step": 2650
},
{
"epoch": 4.083669161389369,
"grad_norm": 0.20125052332878113,
"learning_rate": 9.841120983787915e-06,
"loss": 0.0036,
"step": 2660
},
{
"epoch": 4.09902130109384,
"grad_norm": 0.25469255447387695,
"learning_rate": 9.523947260789546e-06,
"loss": 0.0024,
"step": 2670
},
{
"epoch": 4.114373440798311,
"grad_norm": 0.4579504430294037,
"learning_rate": 9.211429988301823e-06,
"loss": 0.0029,
"step": 2680
},
{
"epoch": 4.129725580502782,
"grad_norm": 0.5402017831802368,
"learning_rate": 8.90360511897121e-06,
"loss": 0.0049,
"step": 2690
},
{
"epoch": 4.1450777202072535,
"grad_norm": 0.6339585185050964,
"learning_rate": 8.600508065620161e-06,
"loss": 0.0032,
"step": 2700
},
{
"epoch": 4.1450777202072535,
"eval_loss": 0.05632999911904335,
"eval_runtime": 197.3915,
"eval_samples_per_second": 5.872,
"eval_steps_per_second": 5.872,
"step": 2700
},
{
"epoch": 4.1604298599117255,
"grad_norm": 0.1994244009256363,
"learning_rate": 8.302173697173226e-06,
"loss": 0.0032,
"step": 2710
},
{
"epoch": 4.175781999616197,
"grad_norm": 0.06390415132045746,
"learning_rate": 8.008636334645631e-06,
"loss": 0.0038,
"step": 2720
},
{
"epoch": 4.191134139320668,
"grad_norm": 0.1828015148639679,
"learning_rate": 7.71992974719491e-06,
"loss": 0.0035,
"step": 2730
},
{
"epoch": 4.206486279025139,
"grad_norm": 0.06357960402965546,
"learning_rate": 7.436087148236054e-06,
"loss": 0.0018,
"step": 2740
},
{
"epoch": 4.22183841872961,
"grad_norm": 0.11706133186817169,
"learning_rate": 7.157141191620548e-06,
"loss": 0.0026,
"step": 2750
},
{
"epoch": 4.22183841872961,
"eval_loss": 0.05644020810723305,
"eval_runtime": 196.6488,
"eval_samples_per_second": 5.894,
"eval_steps_per_second": 5.894,
"step": 2750
},
{
"epoch": 4.237190558434082,
"grad_norm": 0.08909904956817627,
"learning_rate": 6.883123967879796e-06,
"loss": 0.002,
"step": 2760
},
{
"epoch": 4.252542698138553,
"grad_norm": 0.08192160725593567,
"learning_rate": 6.6140670005334136e-06,
"loss": 0.0011,
"step": 2770
},
{
"epoch": 4.267894837843024,
"grad_norm": 0.1498498171567917,
"learning_rate": 6.350001242462617e-06,
"loss": 0.0024,
"step": 2780
},
{
"epoch": 4.2832469775474955,
"grad_norm": 0.18221737444400787,
"learning_rate": 6.090957072349385e-06,
"loss": 0.0033,
"step": 2790
},
{
"epoch": 4.298599117251967,
"grad_norm": 0.03468354791402817,
"learning_rate": 5.836964291181624e-06,
"loss": 0.0024,
"step": 2800
},
{
"epoch": 4.298599117251967,
"eval_loss": 0.058551765978336334,
"eval_runtime": 197.1537,
"eval_samples_per_second": 5.879,
"eval_steps_per_second": 5.879,
"step": 2800
},
{
"epoch": 4.313951256956439,
"grad_norm": 0.17632409930229187,
"learning_rate": 5.588052118824804e-06,
"loss": 0.0019,
"step": 2810
},
{
"epoch": 4.32930339666091,
"grad_norm": 0.10651715844869614,
"learning_rate": 5.344249190660428e-06,
"loss": 0.0014,
"step": 2820
},
{
"epoch": 4.344655536365381,
"grad_norm": 0.18857327103614807,
"learning_rate": 5.105583554291765e-06,
"loss": 0.0023,
"step": 2830
},
{
"epoch": 4.360007676069852,
"grad_norm": 0.08540496975183487,
"learning_rate": 4.872082666317207e-06,
"loss": 0.0012,
"step": 2840
},
{
"epoch": 4.375359815774323,
"grad_norm": 0.1320229172706604,
"learning_rate": 4.6437733891715905e-06,
"loss": 0.0021,
"step": 2850
},
{
"epoch": 4.375359815774323,
"eval_loss": 0.059518758207559586,
"eval_runtime": 198.2964,
"eval_samples_per_second": 5.845,
"eval_steps_per_second": 5.845,
"step": 2850
},
{
"epoch": 4.390711955478795,
"grad_norm": 0.0625540241599083,
"learning_rate": 4.420681988035891e-06,
"loss": 0.0016,
"step": 2860
},
{
"epoch": 4.406064095183266,
"grad_norm": 0.12032885104417801,
"learning_rate": 4.2028341278156026e-06,
"loss": 0.0044,
"step": 2870
},
{
"epoch": 4.4214162348877375,
"grad_norm": 0.05868254974484444,
"learning_rate": 3.990254870188221e-06,
"loss": 0.0033,
"step": 2880
},
{
"epoch": 4.436768374592209,
"grad_norm": 0.03261280059814453,
"learning_rate": 3.7829686707200827e-06,
"loss": 0.0035,
"step": 2890
},
{
"epoch": 4.45212051429668,
"grad_norm": 0.2462652176618576,
"learning_rate": 3.580999376052946e-06,
"loss": 0.0043,
"step": 2900
},
{
"epoch": 4.45212051429668,
"eval_loss": 0.06037411838769913,
"eval_runtime": 198.4021,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 5.842,
"step": 2900
},
{
"epoch": 4.467472654001152,
"grad_norm": 0.07432649284601212,
"learning_rate": 3.3843702211606153e-06,
"loss": 0.0024,
"step": 2910
},
{
"epoch": 4.482824793705623,
"grad_norm": 0.03148715943098068,
"learning_rate": 3.193103826675947e-06,
"loss": 0.0055,
"step": 2920
},
{
"epoch": 4.498176933410094,
"grad_norm": 0.13697880506515503,
"learning_rate": 3.007222196288545e-06,
"loss": 0.0018,
"step": 2930
},
{
"epoch": 4.513529073114565,
"grad_norm": 0.01840958185493946,
"learning_rate": 2.8267467142133687e-06,
"loss": 0.0014,
"step": 2940
},
{
"epoch": 4.528881212819036,
"grad_norm": 0.34372827410697937,
"learning_rate": 2.651698142730674e-06,
"loss": 0.0019,
"step": 2950
},
{
"epoch": 4.528881212819036,
"eval_loss": 0.06066432595252991,
"eval_runtime": 198.4819,
"eval_samples_per_second": 5.839,
"eval_steps_per_second": 5.839,
"step": 2950
},
{
"epoch": 4.544233352523508,
"grad_norm": 0.01365745160728693,
"learning_rate": 2.4820966197974748e-06,
"loss": 0.0038,
"step": 2960
},
{
"epoch": 4.5595854922279795,
"grad_norm": 0.2683570683002472,
"learning_rate": 2.3179616567308216e-06,
"loss": 0.0017,
"step": 2970
},
{
"epoch": 4.574937631932451,
"grad_norm": 0.010202012956142426,
"learning_rate": 2.1593121359631873e-06,
"loss": 0.0023,
"step": 2980
},
{
"epoch": 4.590289771636922,
"grad_norm": 0.015847062692046165,
"learning_rate": 2.006166308870189e-06,
"loss": 0.0017,
"step": 2990
},
{
"epoch": 4.605641911341393,
"grad_norm": 0.010038632899522781,
"learning_rate": 1.8585417936709038e-06,
"loss": 0.0011,
"step": 3000
},
{
"epoch": 4.605641911341393,
"eval_loss": 0.06103256344795227,
"eval_runtime": 198.7127,
"eval_samples_per_second": 5.833,
"eval_steps_per_second": 5.833,
"step": 3000
},
{
"epoch": 4.620994051045864,
"grad_norm": 0.05422445386648178,
"learning_rate": 1.7164555734010545e-06,
"loss": 0.0035,
"step": 3010
},
{
"epoch": 4.636346190750336,
"grad_norm": 0.05177353695034981,
"learning_rate": 1.5799239939592204e-06,
"loss": 0.0032,
"step": 3020
},
{
"epoch": 4.651698330454807,
"grad_norm": 0.07858134806156158,
"learning_rate": 1.4489627622263747e-06,
"loss": 0.0018,
"step": 3030
},
{
"epoch": 4.667050470159278,
"grad_norm": 0.4079644978046417,
"learning_rate": 1.3235869442589255e-06,
"loss": 0.0079,
"step": 3040
},
{
"epoch": 4.6824026098637495,
"grad_norm": 0.2908116281032562,
"learning_rate": 1.2038109635555406e-06,
"loss": 0.0018,
"step": 3050
},
{
"epoch": 4.6824026098637495,
"eval_loss": 0.061669450253248215,
"eval_runtime": 199.4108,
"eval_samples_per_second": 5.812,
"eval_steps_per_second": 5.812,
"step": 3050
},
{
"epoch": 4.697754749568221,
"grad_norm": 0.009541109204292297,
"learning_rate": 1.0896485993977467e-06,
"loss": 0.0043,
"step": 3060
},
{
"epoch": 4.713106889272693,
"grad_norm": 0.07447125762701035,
"learning_rate": 9.811129852647982e-07,
"loss": 0.0024,
"step": 3070
},
{
"epoch": 4.728459028977164,
"grad_norm": 0.06783637404441833,
"learning_rate": 8.782166073227515e-07,
"loss": 0.0043,
"step": 3080
},
{
"epoch": 4.743811168681635,
"grad_norm": 0.04826446250081062,
"learning_rate": 7.809713029880428e-07,
"loss": 0.0023,
"step": 3090
},
{
"epoch": 4.759163308386106,
"grad_norm": 0.0676339641213417,
"learning_rate": 6.893882595656598e-07,
"loss": 0.0051,
"step": 3100
},
{
"epoch": 4.759163308386106,
"eval_loss": 0.061440397053956985,
"eval_runtime": 199.4147,
"eval_samples_per_second": 5.812,
"eval_steps_per_second": 5.812,
"step": 3100
},
{
"epoch": 4.774515448090577,
"grad_norm": 0.28009065985679626,
"learning_rate": 6.034780129621664e-07,
"loss": 0.005,
"step": 3110
},
{
"epoch": 4.789867587795049,
"grad_norm": 0.33211374282836914,
"learning_rate": 5.232504464735833e-07,
"loss": 0.0018,
"step": 3120
},
{
"epoch": 4.80521972749952,
"grad_norm": 0.3357774615287781,
"learning_rate": 4.487147896484523e-07,
"loss": 0.0027,
"step": 3130
},
{
"epoch": 4.8205718672039914,
"grad_norm": 0.009095356799662113,
"learning_rate": 3.7987961722599773e-07,
"loss": 0.0063,
"step": 3140
},
{
"epoch": 4.835924006908463,
"grad_norm": 0.04379582032561302,
"learning_rate": 3.167528481496984e-07,
"loss": 0.0032,
"step": 3150
},
{
"epoch": 4.835924006908463,
"eval_loss": 0.061653103679418564,
"eval_runtime": 199.5471,
"eval_samples_per_second": 5.808,
"eval_steps_per_second": 5.808,
"step": 3150
},
{
"epoch": 4.851276146612934,
"grad_norm": 0.09165111929178238,
"learning_rate": 2.593417446562607e-07,
"loss": 0.0056,
"step": 3160
},
{
"epoch": 4.866628286317406,
"grad_norm": 0.06176696717739105,
"learning_rate": 2.0765291144016486e-07,
"loss": 0.0011,
"step": 3170
},
{
"epoch": 4.881980426021877,
"grad_norm": 0.1245846152305603,
"learning_rate": 1.6169229489385595e-07,
"loss": 0.0031,
"step": 3180
},
{
"epoch": 4.897332565726348,
"grad_norm": 0.0306039210408926,
"learning_rate": 1.2146518242363014e-07,
"loss": 0.0019,
"step": 3190
},
{
"epoch": 4.912684705430819,
"grad_norm": 0.10327928513288498,
"learning_rate": 8.697620184138222e-08,
"loss": 0.001,
"step": 3200
},
{
"epoch": 4.912684705430819,
"eval_loss": 0.06173569709062576,
"eval_runtime": 198.5556,
"eval_samples_per_second": 5.837,
"eval_steps_per_second": 5.837,
"step": 3200
},
{
"epoch": 4.92803684513529,
"grad_norm": 0.056341785937547684,
"learning_rate": 5.822932083221488e-08,
"loss": 0.0029,
"step": 3210
},
{
"epoch": 4.943388984839762,
"grad_norm": 0.10355229675769806,
"learning_rate": 3.5227846497970504e-08,
"loss": 0.002,
"step": 3220
},
{
"epoch": 4.958741124544233,
"grad_norm": 0.04796501621603966,
"learning_rate": 1.7974424976796577e-08,
"loss": 0.0016,
"step": 3230
},
{
"epoch": 4.974093264248705,
"grad_norm": 0.08749626576900482,
"learning_rate": 6.47104113870034e-09,
"loss": 0.0035,
"step": 3240
},
{
"epoch": 4.989445403953176,
"grad_norm": 0.2543277442455292,
"learning_rate": 7.190183572314269e-10,
"loss": 0.0029,
"step": 3250
},
{
"epoch": 4.989445403953176,
"eval_loss": 0.06179765984416008,
"eval_runtime": 199.9691,
"eval_samples_per_second": 5.796,
"eval_steps_per_second": 5.796,
"step": 3250
},
{
"epoch": 4.997121473805412,
"step": 3255,
"total_flos": 8.175027015212728e+17,
"train_loss": 0.04094330755830635,
"train_runtime": 42059.4312,
"train_samples_per_second": 1.239,
"train_steps_per_second": 0.077
}
],
"logging_steps": 10,
"max_steps": 3255,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.175027015212728e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}