| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0778911060510112, | |
| "global_step": 20000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2e-05, | |
| "loss": 3.1471, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4e-05, | |
| "loss": 2.6593, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 6e-05, | |
| "loss": 2.6154, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 8e-05, | |
| "loss": 2.5936, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001, | |
| "loss": 2.5892, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 9.777777777777778e-05, | |
| "loss": 2.5745, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 9.555555555555557e-05, | |
| "loss": 2.5523, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 9.333333333333334e-05, | |
| "loss": 2.5062, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 9.111111111111112e-05, | |
| "loss": 2.5098, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 8.888888888888889e-05, | |
| "loss": 2.4883, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_accuracy": 0.524201837373072, | |
| "eval_loss": 2.5380380153656006, | |
| "eval_runtime": 2042.925, | |
| "eval_samples_per_second": 122.374, | |
| "eval_steps_per_second": 7.648, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 8.666666666666667e-05, | |
| "loss": 2.4751, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 8.444444444444444e-05, | |
| "loss": 2.4578, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 8.222222222222222e-05, | |
| "loss": 2.4484, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 8e-05, | |
| "loss": 2.4408, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 7.777777777777778e-05, | |
| "loss": 2.418, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 7.555555555555556e-05, | |
| "loss": 2.4162, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 7.333333333333333e-05, | |
| "loss": 2.4067, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 7.111111111111112e-05, | |
| "loss": 2.3932, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 6.88888888888889e-05, | |
| "loss": 2.3785, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 2.3628, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "eval_accuracy": 0.530768888210296, | |
| "eval_loss": 2.4960896968841553, | |
| "eval_runtime": 2067.1205, | |
| "eval_samples_per_second": 120.941, | |
| "eval_steps_per_second": 7.559, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 6.444444444444446e-05, | |
| "loss": 2.3703, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 6.222222222222222e-05, | |
| "loss": 2.3593, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 6e-05, | |
| "loss": 2.3603, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 5.7777777777777776e-05, | |
| "loss": 2.3447, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 5.555555555555556e-05, | |
| "loss": 2.3431, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 5.333333333333333e-05, | |
| "loss": 2.332, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 5.111111111111111e-05, | |
| "loss": 2.3199, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.888888888888889e-05, | |
| "loss": 2.3178, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 4.666666666666667e-05, | |
| "loss": 2.3043, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 2.3087, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "eval_accuracy": 0.5384137066763606, | |
| "eval_loss": 2.4490230083465576, | |
| "eval_runtime": 2059.234, | |
| "eval_samples_per_second": 121.404, | |
| "eval_steps_per_second": 7.588, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.222222222222222e-05, | |
| "loss": 2.2956, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4e-05, | |
| "loss": 2.2908, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 3.777777777777778e-05, | |
| "loss": 2.3026, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.555555555555556e-05, | |
| "loss": 2.2831, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 2.2937, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 3.111111111111111e-05, | |
| "loss": 2.2769, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 2.8888888888888888e-05, | |
| "loss": 2.2593, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 2.245, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 2.4444444444444445e-05, | |
| "loss": 2.2561, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 2.2413, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "eval_accuracy": 0.5411358684415605, | |
| "eval_loss": 2.435114860534668, | |
| "eval_runtime": 2054.406, | |
| "eval_samples_per_second": 121.69, | |
| "eval_steps_per_second": 7.606, | |
| "step": 20000 | |
| } | |
| ], | |
| "max_steps": 25000, | |
| "num_train_epochs": 2, | |
| "total_flos": 1.505927324733604e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |