| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.6839945280437757, | |
| "eval_steps": 50, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.7769, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 1.4834, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 1.02e-05, | |
| "loss": 1.6171, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 1.62e-05, | |
| "loss": 1.5372, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.22e-05, | |
| "loss": 1.618, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.8199999999999998e-05, | |
| "loss": 1.3178, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "eval_loss": 1.0766865015029907, | |
| "eval_runtime": 137.8647, | |
| "eval_samples_per_second": 4.715, | |
| "eval_steps_per_second": 1.182, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.999704741743589e-05, | |
| "loss": 1.1333, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 2.9979008066540737e-05, | |
| "loss": 0.9023, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 2.9944589390244404e-05, | |
| "loss": 0.9608, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 2.9893829024864087e-05, | |
| "loss": 0.8594, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.9826782476114073e-05, | |
| "loss": 0.7765, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_loss": 0.7130317091941833, | |
| "eval_runtime": 137.4102, | |
| "eval_samples_per_second": 4.73, | |
| "eval_steps_per_second": 1.186, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 2.9743523058411057e-05, | |
| "loss": 0.8438, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 2.9644141814705893e-05, | |
| "loss": 0.7234, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.9528747416929467e-05, | |
| "loss": 0.6949, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 2.939746604716155e-05, | |
| "loss": 0.7523, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 2.925044125965253e-05, | |
| "loss": 0.6491, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 0.6840489506721497, | |
| "eval_runtime": 137.3722, | |
| "eval_samples_per_second": 4.732, | |
| "eval_steps_per_second": 1.187, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 2.9087833823848947e-05, | |
| "loss": 0.677, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.890982154859448e-05, | |
| "loss": 0.7101, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.8716599087698565e-05, | |
| "loss": 0.7427, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.8508377727085337e-05, | |
| "loss": 0.7108, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.8285385153755532e-05, | |
| "loss": 0.6441, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_loss": 0.6828967928886414, | |
| "eval_runtime": 137.6745, | |
| "eval_samples_per_second": 4.721, | |
| "eval_steps_per_second": 1.184, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.8047865206814164e-05, | |
| "loss": 0.6786, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 2.779607761083596e-05, | |
| "loss": 0.6927, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.7530297691860436e-05, | |
| "loss": 0.688, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.7250816076326834e-05, | |
| "loss": 0.6965, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 2.695793837327844e-05, | |
| "loss": 0.701, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_loss": 0.6642373204231262, | |
| "eval_runtime": 137.4893, | |
| "eval_samples_per_second": 4.728, | |
| "eval_steps_per_second": 1.186, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.6651984840183545e-05, | |
| "loss": 0.7585, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.6333290032738626e-05, | |
| "loss": 0.6551, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 2.60022024390366e-05, | |
| "loss": 0.656, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.565908409850019e-05, | |
| "loss": 0.6265, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 2.5304310205997168e-05, | |
| "loss": 0.6936, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "eval_loss": 0.6427347660064697, | |
| "eval_runtime": 137.4455, | |
| "eval_samples_per_second": 4.729, | |
| "eval_steps_per_second": 1.186, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 2.4938268701570245e-05, | |
| "loss": 0.6359, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 2.4561359846230346e-05, | |
| "loss": 0.654, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 2.4173995784277065e-05, | |
| "loss": 0.6803, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 2.3776600092624925e-05, | |
| "loss": 0.6788, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 2.3369607317628244e-05, | |
| "loss": 0.6538, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 0.6175208687782288, | |
| "eval_runtime": 137.5627, | |
| "eval_samples_per_second": 4.725, | |
| "eval_steps_per_second": 1.185, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 2.2953462499911072e-05, | |
| "loss": 0.5896, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 2.2528620687721802e-05, | |
| "loss": 0.6616, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 2.2095546439344614e-05, | |
| "loss": 0.6206, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.165471331511176e-05, | |
| "loss": 0.6473, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.1206603359572346e-05, | |
| "loss": 0.5927, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_loss": 0.6138765215873718, | |
| "eval_runtime": 137.4362, | |
| "eval_samples_per_second": 4.729, | |
| "eval_steps_per_second": 1.186, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 2.0751706574383676e-05, | |
| "loss": 0.658, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 2.029052038250162e-05, | |
| "loss": 0.6825, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 1.982354908425593e-05, | |
| "loss": 0.6424, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 1.935130330590525e-05, | |
| "loss": 0.6188, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.887429944127475e-05, | |
| "loss": 0.6709, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_loss": 0.6129215955734253, | |
| "eval_runtime": 137.6096, | |
| "eval_samples_per_second": 4.724, | |
| "eval_steps_per_second": 1.185, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.8393059087087106e-05, | |
| "loss": 0.649, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.7908108472604124e-05, | |
| "loss": 0.5975, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 1.7419977884202765e-05, | |
| "loss": 0.6119, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.6929201085514793e-05, | |
| "loss": 0.5895, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.643631473376405e-05, | |
| "loss": 0.5961, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "eval_loss": 0.6077620983123779, | |
| "eval_runtime": 137.6715, | |
| "eval_samples_per_second": 4.721, | |
| "eval_steps_per_second": 1.184, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 1.5941857792939702e-05, | |
| "loss": 0.6895, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 1.5446370944446987e-05, | |
| "loss": 0.6227, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 1.4950395995880073e-05, | |
| "loss": 0.6821, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 1.4454475288563387e-05, | |
| "loss": 0.6606, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.395915110450934e-05, | |
| "loss": 0.6161, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_loss": 0.5955923199653625, | |
| "eval_runtime": 137.6678, | |
| "eval_samples_per_second": 4.722, | |
| "eval_steps_per_second": 1.184, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.3464965073440924e-05, | |
| "loss": 0.6497, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 1.2972457580527551e-05, | |
| "loss": 0.6619, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 1.2482167175481786e-05, | |
| "loss": 0.6551, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 1.1994629983663183e-05, | |
| "loss": 0.6313, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 1.1510379119833048e-05, | |
| "loss": 0.5999, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_loss": 0.5938005447387695, | |
| "eval_runtime": 137.3859, | |
| "eval_samples_per_second": 4.731, | |
| "eval_steps_per_second": 1.186, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.1029944105201278e-05, | |
| "loss": 0.5862, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.0553850288402696e-05, | |
| "loss": 0.572, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 1.0082618271036033e-05, | |
| "loss": 0.6192, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 9.616763338393728e-06, | |
| "loss": 0.6318, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 9.156794896005e-06, | |
| "loss": 0.6248, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_loss": 0.5824475884437561, | |
| "eval_runtime": 137.4611, | |
| "eval_samples_per_second": 4.729, | |
| "eval_steps_per_second": 1.186, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 8.703215912608416e-06, | |
| "loss": 0.6222, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 8.256522370162949e-06, | |
| "loss": 0.5987, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.817202721498955e-06, | |
| "loss": 0.6221, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.385737356202244e-06, | |
| "loss": 0.5991, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 6.962598075315047e-06, | |
| "loss": 0.6494, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_loss": 0.5806382894515991, | |
| "eval_runtime": 137.5354, | |
| "eval_samples_per_second": 4.726, | |
| "eval_steps_per_second": 1.185, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 6.5482475754285535e-06, | |
| "loss": 0.5733, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 6.143138942730943e-06, | |
| "loss": 0.6724, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 5.747715157564335e-06, | |
| "loss": 0.5736, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 5.362408610032257e-06, | |
| "loss": 0.6059, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.987640627187413e-06, | |
| "loss": 0.6259, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_loss": 0.5767314434051514, | |
| "eval_runtime": 137.4798, | |
| "eval_samples_per_second": 4.728, | |
| "eval_steps_per_second": 1.186, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.623821012316761e-06, | |
| "loss": 0.5797, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.27134759682762e-06, | |
| "loss": 0.6227, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 3.930605805224858e-06, | |
| "loss": 0.6119, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 3.6019682336548736e-06, | |
| "loss": 0.6289, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 3.285794242477173e-06, | |
| "loss": 0.557, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_loss": 0.5762439966201782, | |
| "eval_runtime": 137.5294, | |
| "eval_samples_per_second": 4.726, | |
| "eval_steps_per_second": 1.185, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 2.9824295633090864e-06, | |
| "loss": 0.536, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.692205920973333e-06, | |
| "loss": 0.662, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 2.4154406707617813e-06, | |
| "loss": 0.5556, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 2.1524364514121193e-06, | |
| "loss": 0.6678, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 1.903480854176805e-06, | |
| "loss": 0.6215, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "eval_loss": 0.5777194499969482, | |
| "eval_runtime": 137.4572, | |
| "eval_samples_per_second": 4.729, | |
| "eval_steps_per_second": 1.186, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.6688461083462942e-06, | |
| "loss": 0.5598, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.4487887835702773e-06, | |
| "loss": 0.6472, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.2435495093025523e-06, | |
| "loss": 0.5749, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 1.0533527116762298e-06, | |
| "loss": 0.5959, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 8.784063680970788e-07, | |
| "loss": 0.5986, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "eval_loss": 0.5770220160484314, | |
| "eval_runtime": 137.4371, | |
| "eval_samples_per_second": 4.729, | |
| "eval_steps_per_second": 1.186, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 7.189017798232672e-07, | |
| "loss": 0.5965, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 5.75013362780244e-07, | |
| "loss": 0.5667, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.46898456839504e-07, | |
| "loss": 0.5984, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 3.346971537697263e-07, | |
| "loss": 0.6219, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 2.38532144048495e-07, | |
| "loss": 0.6224, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_loss": 0.57674241065979, | |
| "eval_runtime": 137.5142, | |
| "eval_samples_per_second": 4.727, | |
| "eval_steps_per_second": 1.185, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.5850858270205992e-07, | |
| "loss": 0.532, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.471397431985884e-08, | |
| "loss": 0.6332, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.721807736953576e-08, | |
| "loss": 0.5957, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.607282791707687e-08, | |
| "loss": 0.6052, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.3122828354905637e-09, | |
| "loss": 0.6058, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_loss": 0.5767720937728882, | |
| "eval_runtime": 137.4993, | |
| "eval_samples_per_second": 4.727, | |
| "eval_steps_per_second": 1.185, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "step": 1000, | |
| "total_flos": 9.350756982954394e+16, | |
| "train_loss": 0.6960326323509216, | |
| "train_runtime": 5130.9272, | |
| "train_samples_per_second": 0.78, | |
| "train_steps_per_second": 0.195 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1000, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 9.350756982954394e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |