| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.57190357439734, | |
| "eval_steps": 500, | |
| "global_step": 11000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.20781379883624274, | |
| "grad_norm": 0.7872959971427917, | |
| "learning_rate": 1.917206982543641e-05, | |
| "loss": 0.7834, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.41562759767248547, | |
| "grad_norm": 0.9406130313873291, | |
| "learning_rate": 1.834081463009144e-05, | |
| "loss": 0.4895, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6234413965087282, | |
| "grad_norm": 0.4806763529777527, | |
| "learning_rate": 1.7509559434746467e-05, | |
| "loss": 0.4473, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8312551953449709, | |
| "grad_norm": 0.5443118214607239, | |
| "learning_rate": 1.6678304239401496e-05, | |
| "loss": 0.4326, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.33564111590385437, | |
| "eval_runtime": 26.7564, | |
| "eval_samples_per_second": 18.463, | |
| "eval_steps_per_second": 4.634, | |
| "step": 2406 | |
| }, | |
| { | |
| "epoch": 1.0390689941812137, | |
| "grad_norm": 0.7427258491516113, | |
| "learning_rate": 1.5847049044056525e-05, | |
| "loss": 0.4225, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.2468827930174564, | |
| "grad_norm": 0.4491533637046814, | |
| "learning_rate": 1.5015793848711555e-05, | |
| "loss": 0.3857, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.4546965918536992, | |
| "grad_norm": 0.4072982668876648, | |
| "learning_rate": 1.4184538653366584e-05, | |
| "loss": 0.3754, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.6625103906899419, | |
| "grad_norm": 0.8853089213371277, | |
| "learning_rate": 1.3353283458021613e-05, | |
| "loss": 0.3765, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.8703241895261846, | |
| "grad_norm": 0.59534752368927, | |
| "learning_rate": 1.2522028262676643e-05, | |
| "loss": 0.3677, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.3049054443836212, | |
| "eval_runtime": 26.7561, | |
| "eval_samples_per_second": 18.463, | |
| "eval_steps_per_second": 4.634, | |
| "step": 4812 | |
| }, | |
| { | |
| "epoch": 2.0781379883624274, | |
| "grad_norm": 0.5839773416519165, | |
| "learning_rate": 1.1690773067331672e-05, | |
| "loss": 0.3488, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.28595178719867, | |
| "grad_norm": 0.8203754425048828, | |
| "learning_rate": 1.086118038237739e-05, | |
| "loss": 0.3526, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.493765586034913, | |
| "grad_norm": 0.7811100482940674, | |
| "learning_rate": 1.0029925187032419e-05, | |
| "loss": 0.3438, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.7015793848711556, | |
| "grad_norm": 0.823439359664917, | |
| "learning_rate": 9.19866999168745e-06, | |
| "loss": 0.3406, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 2.9093931837073983, | |
| "grad_norm": 0.5342724323272705, | |
| "learning_rate": 8.367414796342478e-06, | |
| "loss": 0.3333, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.2934778034687042, | |
| "eval_runtime": 26.7608, | |
| "eval_samples_per_second": 18.46, | |
| "eval_steps_per_second": 4.634, | |
| "step": 7218 | |
| }, | |
| { | |
| "epoch": 3.117206982543641, | |
| "grad_norm": 0.4342059791088104, | |
| "learning_rate": 7.536159600997507e-06, | |
| "loss": 0.3313, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.3250207813798838, | |
| "grad_norm": 0.9092524647712708, | |
| "learning_rate": 6.704904405652536e-06, | |
| "loss": 0.3265, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.5328345802161265, | |
| "grad_norm": 0.7057175040245056, | |
| "learning_rate": 5.873649210307565e-06, | |
| "loss": 0.3219, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 3.7406483790523692, | |
| "grad_norm": 0.8144080638885498, | |
| "learning_rate": 5.0423940149625935e-06, | |
| "loss": 0.316, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 3.9484621778886115, | |
| "grad_norm": 0.5943393707275391, | |
| "learning_rate": 4.212801330008313e-06, | |
| "loss": 0.3155, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.2871634364128113, | |
| "eval_runtime": 26.8766, | |
| "eval_samples_per_second": 18.38, | |
| "eval_steps_per_second": 4.614, | |
| "step": 9624 | |
| }, | |
| { | |
| "epoch": 4.156275976724855, | |
| "grad_norm": 1.265112280845642, | |
| "learning_rate": 3.381546134663342e-06, | |
| "loss": 0.3036, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.364089775561097, | |
| "grad_norm": 0.7941420078277588, | |
| "learning_rate": 2.550290939318371e-06, | |
| "loss": 0.3129, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 4.57190357439734, | |
| "grad_norm": 0.956358015537262, | |
| "learning_rate": 1.7190357439733998e-06, | |
| "loss": 0.3166, | |
| "step": 11000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 12030, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.679171135307776e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |