| { | |
| "best_global_step": 10000, | |
| "best_metric": 0.45208191871643066, | |
| "best_model_checkpoint": "codebert_base_code_uml_c/checkpoint-10000", | |
| "epoch": 5.0, | |
| "eval_steps": 10000, | |
| "global_step": 11470, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.21795989537925023, | |
| "grad_norm": 1.2200626134872437, | |
| "learning_rate": 4.9900000000000005e-06, | |
| "loss": 0.9993, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.43591979075850046, | |
| "grad_norm": 1.1631908416748047, | |
| "learning_rate": 9.990000000000001e-06, | |
| "loss": 0.767, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6538796861377506, | |
| "grad_norm": 0.973538339138031, | |
| "learning_rate": 1.499e-05, | |
| "loss": 0.7035, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8718395815170009, | |
| "grad_norm": 0.8392261862754822, | |
| "learning_rate": 1.999e-05, | |
| "loss": 0.6682, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0897994768962511, | |
| "grad_norm": 0.8580324053764343, | |
| "learning_rate": 2.4990000000000003e-05, | |
| "loss": 0.6422, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.3077593722755014, | |
| "grad_norm": 0.911376416683197, | |
| "learning_rate": 2.9990000000000003e-05, | |
| "loss": 0.6221, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.5257192676547515, | |
| "grad_norm": 0.821864902973175, | |
| "learning_rate": 3.499e-05, | |
| "loss": 0.6094, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.7436791630340016, | |
| "grad_norm": 0.8506414890289307, | |
| "learning_rate": 3.999e-05, | |
| "loss": 0.5945, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.961639058413252, | |
| "grad_norm": 0.7997013330459595, | |
| "learning_rate": 4.499e-05, | |
| "loss": 0.5884, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.1795989537925022, | |
| "grad_norm": 0.8773174285888672, | |
| "learning_rate": 4.999e-05, | |
| "loss": 0.5753, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.3975588491717525, | |
| "grad_norm": 0.7826923131942749, | |
| "learning_rate": 5.499000000000001e-05, | |
| "loss": 0.5712, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.615518744551003, | |
| "grad_norm": 0.7175124287605286, | |
| "learning_rate": 5.999e-05, | |
| "loss": 0.5602, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.8334786399302527, | |
| "grad_norm": 0.6982043981552124, | |
| "learning_rate": 6.499000000000001e-05, | |
| "loss": 0.5554, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.051438535309503, | |
| "grad_norm": 0.6505544781684875, | |
| "learning_rate": 6.999e-05, | |
| "loss": 0.5491, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.2693984306887534, | |
| "grad_norm": 0.7241908311843872, | |
| "learning_rate": 7.499e-05, | |
| "loss": 0.5435, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.4873583260680037, | |
| "grad_norm": 0.7068212032318115, | |
| "learning_rate": 7.999000000000001e-05, | |
| "loss": 0.5433, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.7053182214472535, | |
| "grad_norm": 0.7010810375213623, | |
| "learning_rate": 8.499e-05, | |
| "loss": 0.5385, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 3.923278116826504, | |
| "grad_norm": 0.685562252998352, | |
| "learning_rate": 8.999000000000001e-05, | |
| "loss": 0.534, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.141238012205754, | |
| "grad_norm": 0.700875997543335, | |
| "learning_rate": 9.499e-05, | |
| "loss": 0.5343, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.3591979075850045, | |
| "grad_norm": 0.7298157811164856, | |
| "learning_rate": 9.999000000000001e-05, | |
| "loss": 0.5254, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.3591979075850045, | |
| "eval_accuracy": 0.9053344805784795, | |
| "eval_loss": 0.45208191871643066, | |
| "eval_runtime": 83.4945, | |
| "eval_samples_per_second": 149.124, | |
| "eval_steps_per_second": 1.557, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.577157802964255, | |
| "grad_norm": 0.677169680595398, | |
| "learning_rate": 6.605442176870749e-05, | |
| "loss": 0.5214, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 4.795117698343505, | |
| "grad_norm": 0.7635814547538757, | |
| "learning_rate": 3.2040816326530615e-05, | |
| "loss": 0.5126, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 11470, | |
| "total_flos": 2.898276322152499e+17, | |
| "train_loss": 0.5986486167209296, | |
| "train_runtime": 10447.1842, | |
| "train_samples_per_second": 105.377, | |
| "train_steps_per_second": 1.098 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 11470, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 10000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.898276322152499e+17, | |
| "train_batch_size": 96, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |