Training in progress, step 500
Browse files- .ipynb_checkpoints/lora_orpo-checkpoint.yaml +43 -0
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- lora_orpo.yaml +43 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- trainer_log.jsonl +51 -0
- training_args.bin +3 -0
    	
        .ipynb_checkpoints/lora_orpo-checkpoint.yaml
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ### model
         | 
| 2 | 
            +
            model_name_or_path: mistralai/Mistral-7B-Instruct-v0.3
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            ### method
         | 
| 5 | 
            +
            stage: orpo
         | 
| 6 | 
            +
            do_train: true
         | 
| 7 | 
            +
            finetuning_type: lora
         | 
| 8 | 
            +
            lora_target: all
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            ### dataset
         | 
| 11 | 
            +
            dataset: dpo_mix_en
         | 
| 12 | 
            +
            dataset_dir: data
         | 
| 13 | 
            +
            template: mistral
         | 
| 14 | 
            +
            cutoff_len: 1024
         | 
| 15 | 
            +
            # max_samples: 1000
         | 
| 16 | 
            +
            overwrite_cache: true
         | 
| 17 | 
            +
            preprocessing_num_workers: 16
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            ### output
         | 
| 20 | 
            +
            output_dir: saves/Mistral-7B-Instruct-v0.3/lora/orpo
         | 
| 21 | 
            +
            logging_steps: 10
         | 
| 22 | 
            +
            save_steps: 500
         | 
| 23 | 
            +
            plot_loss: true
         | 
| 24 | 
            +
            overwrite_output_dir: true
         | 
| 25 | 
            +
            save_total_limit: 3
         | 
| 26 | 
            +
            load_best_model_at_end: true
         | 
| 27 | 
            +
            push_to_hub: true
         | 
| 28 | 
            +
            hub_model_id: chchen/Mistral-7B-Instruct-v0.3-ORPO
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            ### train
         | 
| 31 | 
            +
            per_device_train_batch_size: 2
         | 
| 32 | 
            +
            gradient_accumulation_steps: 8
         | 
| 33 | 
            +
            learning_rate: 0.000005
         | 
| 34 | 
            +
            num_train_epochs: 3.0
         | 
| 35 | 
            +
            lr_scheduler_type: cosine
         | 
| 36 | 
            +
            warmup_steps: 0.1
         | 
| 37 | 
            +
            bf16: true
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            ### eval
         | 
| 40 | 
            +
            val_size: 0.1
         | 
| 41 | 
            +
            per_device_eval_batch_size: 2
         | 
| 42 | 
            +
            evaluation_strategy: steps
         | 
| 43 | 
            +
            eval_steps: 500
         | 
    	
        adapter_config.json
    ADDED
    
    | @@ -0,0 +1,34 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "alpha_pattern": {},
         | 
| 3 | 
            +
              "auto_mapping": null,
         | 
| 4 | 
            +
              "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
         | 
| 5 | 
            +
              "bias": "none",
         | 
| 6 | 
            +
              "fan_in_fan_out": false,
         | 
| 7 | 
            +
              "inference_mode": true,
         | 
| 8 | 
            +
              "init_lora_weights": true,
         | 
| 9 | 
            +
              "layer_replication": null,
         | 
| 10 | 
            +
              "layers_pattern": null,
         | 
| 11 | 
            +
              "layers_to_transform": null,
         | 
| 12 | 
            +
              "loftq_config": {},
         | 
| 13 | 
            +
              "lora_alpha": 16,
         | 
| 14 | 
            +
              "lora_dropout": 0.0,
         | 
| 15 | 
            +
              "megatron_config": null,
         | 
| 16 | 
            +
              "megatron_core": "megatron.core",
         | 
| 17 | 
            +
              "modules_to_save": null,
         | 
| 18 | 
            +
              "peft_type": "LORA",
         | 
| 19 | 
            +
              "r": 8,
         | 
| 20 | 
            +
              "rank_pattern": {},
         | 
| 21 | 
            +
              "revision": null,
         | 
| 22 | 
            +
              "target_modules": [
         | 
| 23 | 
            +
                "gate_proj",
         | 
| 24 | 
            +
                "up_proj",
         | 
| 25 | 
            +
                "o_proj",
         | 
| 26 | 
            +
                "v_proj",
         | 
| 27 | 
            +
                "q_proj",
         | 
| 28 | 
            +
                "down_proj",
         | 
| 29 | 
            +
                "k_proj"
         | 
| 30 | 
            +
              ],
         | 
| 31 | 
            +
              "task_type": "CAUSAL_LM",
         | 
| 32 | 
            +
              "use_dora": false,
         | 
| 33 | 
            +
              "use_rslora": false
         | 
| 34 | 
            +
            }
         | 
    	
        adapter_model.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:09a8acfbf35a8a366284f3658253aebf69472fe237cef71ffcef54976eb0d738
         | 
| 3 | 
            +
            size 83945296
         | 
    	
        lora_orpo.yaml
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ### model
         | 
| 2 | 
            +
            model_name_or_path: mistralai/Mistral-7B-Instruct-v0.3
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            ### method
         | 
| 5 | 
            +
            stage: orpo
         | 
| 6 | 
            +
            do_train: true
         | 
| 7 | 
            +
            finetuning_type: lora
         | 
| 8 | 
            +
            lora_target: all
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            ### dataset
         | 
| 11 | 
            +
            dataset: dpo_mix_en
         | 
| 12 | 
            +
            dataset_dir: data
         | 
| 13 | 
            +
            template: mistral
         | 
| 14 | 
            +
            cutoff_len: 1024
         | 
| 15 | 
            +
            # max_samples: 1000
         | 
| 16 | 
            +
            overwrite_cache: true
         | 
| 17 | 
            +
            preprocessing_num_workers: 16
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            ### output
         | 
| 20 | 
            +
            output_dir: saves/Mistral-7B-Instruct-v0.3/lora/orpo
         | 
| 21 | 
            +
            logging_steps: 10
         | 
| 22 | 
            +
            save_steps: 500
         | 
| 23 | 
            +
            plot_loss: true
         | 
| 24 | 
            +
            overwrite_output_dir: true
         | 
| 25 | 
            +
            save_total_limit: 3
         | 
| 26 | 
            +
            load_best_model_at_end: true
         | 
| 27 | 
            +
            push_to_hub: true
         | 
| 28 | 
            +
            hub_model_id: chchen/Mistral-7B-Instruct-v0.3-ORPO
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            ### train
         | 
| 31 | 
            +
            per_device_train_batch_size: 2
         | 
| 32 | 
            +
            gradient_accumulation_steps: 8
         | 
| 33 | 
            +
            learning_rate: 0.000005
         | 
| 34 | 
            +
            num_train_epochs: 3.0
         | 
| 35 | 
            +
            lr_scheduler_type: cosine
         | 
| 36 | 
            +
            warmup_steps: 0.1
         | 
| 37 | 
            +
            bf16: true
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            ### eval
         | 
| 40 | 
            +
            val_size: 0.1
         | 
| 41 | 
            +
            per_device_eval_batch_size: 2
         | 
| 42 | 
            +
            evaluation_strategy: steps
         | 
| 43 | 
            +
            eval_steps: 500
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "content": "<s>",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": false,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "eos_token": {
         | 
| 10 | 
            +
                "content": "</s>",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": false,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": "</s>",
         | 
| 17 | 
            +
              "unk_token": {
         | 
| 18 | 
            +
                "content": "<unk>",
         | 
| 19 | 
            +
                "lstrip": false,
         | 
| 20 | 
            +
                "normalized": false,
         | 
| 21 | 
            +
                "rstrip": false,
         | 
| 22 | 
            +
                "single_word": false
         | 
| 23 | 
            +
              }
         | 
| 24 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer.model
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
         | 
| 3 | 
            +
            size 587404
         | 
    	
        tokenizer_config.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        trainer_log.jsonl
    ADDED
    
    | @@ -0,0 +1,51 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {"current_steps": 10, "total_steps": 1686, "loss": 1.0773, "accuracy": 0.518750011920929, "learning_rate": 4.9995745934141085e-06, "epoch": 0.017781729273171815, "percentage": 0.59, "elapsed_time": "0:02:57", "remaining_time": "8:15:19"}
         | 
| 2 | 
            +
            {"current_steps": 20, "total_steps": 1686, "loss": 1.1014, "accuracy": 0.5687500238418579, "learning_rate": 4.9982812903243405e-06, "epoch": 0.03556345854634363, "percentage": 1.19, "elapsed_time": "0:05:56", "remaining_time": "8:15:27"}
         | 
| 3 | 
            +
            {"current_steps": 30, "total_steps": 1686, "loss": 1.0616, "accuracy": 0.6000000238418579, "learning_rate": 4.996120496405222e-06, "epoch": 0.05334518781951545, "percentage": 1.78, "elapsed_time": "0:08:53", "remaining_time": "8:11:16"}
         | 
| 4 | 
            +
            {"current_steps": 40, "total_steps": 1686, "loss": 1.1331, "accuracy": 0.5375000238418579, "learning_rate": 4.99309296196014e-06, "epoch": 0.07112691709268726, "percentage": 2.37, "elapsed_time": "0:11:49", "remaining_time": "8:06:42"}
         | 
| 5 | 
            +
            {"current_steps": 50, "total_steps": 1686, "loss": 0.9797, "accuracy": 0.6499999761581421, "learning_rate": 4.989199738255166e-06, "epoch": 0.08890864636585907, "percentage": 2.97, "elapsed_time": "0:14:49", "remaining_time": "8:05:17"}
         | 
| 6 | 
            +
            {"current_steps": 60, "total_steps": 1686, "loss": 1.0632, "accuracy": 0.550000011920929, "learning_rate": 4.984442177154031e-06, "epoch": 0.1066903756390309, "percentage": 3.56, "elapsed_time": "0:17:54", "remaining_time": "8:05:05"}
         | 
| 7 | 
            +
            {"current_steps": 70, "total_steps": 1686, "loss": 1.0027, "accuracy": 0.5562499761581421, "learning_rate": 4.978821930648704e-06, "epoch": 0.12447210491220272, "percentage": 4.15, "elapsed_time": "0:21:00", "remaining_time": "8:04:48"}
         | 
| 8 | 
            +
            {"current_steps": 80, "total_steps": 1686, "loss": 0.9964, "accuracy": 0.550000011920929, "learning_rate": 4.97234095028576e-06, "epoch": 0.14225383418537452, "percentage": 4.74, "elapsed_time": "0:24:06", "remaining_time": "8:03:55"}
         | 
| 9 | 
            +
            {"current_steps": 90, "total_steps": 1686, "loss": 0.9472, "accuracy": 0.606249988079071, "learning_rate": 4.965001486488743e-06, "epoch": 0.16003556345854633, "percentage": 5.34, "elapsed_time": "0:27:08", "remaining_time": "8:01:18"}
         | 
| 10 | 
            +
            {"current_steps": 100, "total_steps": 1686, "loss": 0.9857, "accuracy": 0.5874999761581421, "learning_rate": 4.956806087776732e-06, "epoch": 0.17781729273171815, "percentage": 5.93, "elapsed_time": "0:30:02", "remaining_time": "7:56:34"}
         | 
| 11 | 
            +
            {"current_steps": 110, "total_steps": 1686, "loss": 1.0259, "accuracy": 0.581250011920929, "learning_rate": 4.947757599879411e-06, "epoch": 0.19559902200489, "percentage": 6.52, "elapsed_time": "0:32:54", "remaining_time": "7:51:29"}
         | 
| 12 | 
            +
            {"current_steps": 120, "total_steps": 1686, "loss": 0.9473, "accuracy": 0.5687500238418579, "learning_rate": 4.937859164748931e-06, "epoch": 0.2133807512780618, "percentage": 7.12, "elapsed_time": "0:35:46", "remaining_time": "7:46:52"}
         | 
| 13 | 
            +
            {"current_steps": 130, "total_steps": 1686, "loss": 0.9558, "accuracy": 0.5562499761581421, "learning_rate": 4.92711421946891e-06, "epoch": 0.23116248055123362, "percentage": 7.71, "elapsed_time": "0:38:56", "remaining_time": "7:46:08"}
         | 
| 14 | 
            +
            {"current_steps": 140, "total_steps": 1686, "loss": 0.9238, "accuracy": 0.5687500238418579, "learning_rate": 4.915526495060961e-06, "epoch": 0.24894420982440543, "percentage": 8.3, "elapsed_time": "0:42:00", "remaining_time": "7:43:51"}
         | 
| 15 | 
            +
            {"current_steps": 150, "total_steps": 1686, "loss": 0.9062, "accuracy": 0.606249988079071, "learning_rate": 4.903100015189153e-06, "epoch": 0.26672593909757725, "percentage": 8.9, "elapsed_time": "0:45:03", "remaining_time": "7:41:23"}
         | 
| 16 | 
            +
            {"current_steps": 160, "total_steps": 1686, "loss": 0.952, "accuracy": 0.5687500238418579, "learning_rate": 4.889839094762848e-06, "epoch": 0.28450766837074903, "percentage": 9.49, "elapsed_time": "0:47:59", "remaining_time": "7:37:38"}
         | 
| 17 | 
            +
            {"current_steps": 170, "total_steps": 1686, "loss": 0.9602, "accuracy": 0.543749988079071, "learning_rate": 4.875748338438416e-06, "epoch": 0.3022893976439209, "percentage": 10.08, "elapsed_time": "0:50:53", "remaining_time": "7:33:52"}
         | 
| 18 | 
            +
            {"current_steps": 180, "total_steps": 1686, "loss": 0.9188, "accuracy": 0.5687500238418579, "learning_rate": 4.8608326390203386e-06, "epoch": 0.32007112691709266, "percentage": 10.68, "elapsed_time": "0:53:54", "remaining_time": "7:31:00"}
         | 
| 19 | 
            +
            {"current_steps": 190, "total_steps": 1686, "loss": 0.9743, "accuracy": 0.543749988079071, "learning_rate": 4.845097175762251e-06, "epoch": 0.3378528561902645, "percentage": 11.27, "elapsed_time": "0:56:54", "remaining_time": "7:28:06"}
         | 
| 20 | 
            +
            {"current_steps": 200, "total_steps": 1686, "loss": 0.9757, "accuracy": 0.53125, "learning_rate": 4.8285474125685286e-06, "epoch": 0.3556345854634363, "percentage": 11.86, "elapsed_time": "0:59:51", "remaining_time": "7:24:41"}
         | 
| 21 | 
            +
            {"current_steps": 210, "total_steps": 1686, "loss": 0.9473, "accuracy": 0.5562499761581421, "learning_rate": 4.811189096097025e-06, "epoch": 0.37341631473660813, "percentage": 12.46, "elapsed_time": "1:03:03", "remaining_time": "7:23:14"}
         | 
| 22 | 
            +
            {"current_steps": 220, "total_steps": 1686, "loss": 0.9507, "accuracy": 0.581250011920929, "learning_rate": 4.793028253763633e-06, "epoch": 0.39119804400978, "percentage": 13.05, "elapsed_time": "1:06:04", "remaining_time": "7:20:21"}
         | 
| 23 | 
            +
            {"current_steps": 230, "total_steps": 1686, "loss": 0.9107, "accuracy": 0.581250011920929, "learning_rate": 4.774071191649352e-06, "epoch": 0.40897977328295176, "percentage": 13.64, "elapsed_time": "1:09:09", "remaining_time": "7:17:50"}
         | 
| 24 | 
            +
            {"current_steps": 240, "total_steps": 1686, "loss": 0.9829, "accuracy": 0.518750011920929, "learning_rate": 4.7543244923105975e-06, "epoch": 0.4267615025561236, "percentage": 14.23, "elapsed_time": "1:12:12", "remaining_time": "7:15:01"}
         | 
| 25 | 
            +
            {"current_steps": 250, "total_steps": 1686, "loss": 0.9739, "accuracy": 0.550000011920929, "learning_rate": 4.733795012493506e-06, "epoch": 0.4445432318292954, "percentage": 14.83, "elapsed_time": "1:15:16", "remaining_time": "7:12:20"}
         | 
| 26 | 
            +
            {"current_steps": 260, "total_steps": 1686, "loss": 0.8776, "accuracy": 0.5625, "learning_rate": 4.712489880753035e-06, "epoch": 0.46232496110246724, "percentage": 15.42, "elapsed_time": "1:18:24", "remaining_time": "7:10:02"}
         | 
| 27 | 
            +
            {"current_steps": 270, "total_steps": 1686, "loss": 0.8925, "accuracy": 0.6000000238418579, "learning_rate": 4.690416494977673e-06, "epoch": 0.480106690375639, "percentage": 16.01, "elapsed_time": "1:21:25", "remaining_time": "7:07:03"}
         | 
| 28 | 
            +
            {"current_steps": 280, "total_steps": 1686, "loss": 1.018, "accuracy": 0.5687500238418579, "learning_rate": 4.667582519820639e-06, "epoch": 0.49788841964881086, "percentage": 16.61, "elapsed_time": "1:24:37", "remaining_time": "7:04:55"}
         | 
| 29 | 
            +
            {"current_steps": 290, "total_steps": 1686, "loss": 0.9404, "accuracy": 0.5562499761581421, "learning_rate": 4.643995884038443e-06, "epoch": 0.5156701489219827, "percentage": 17.2, "elapsed_time": "1:27:39", "remaining_time": "7:01:59"}
         | 
| 30 | 
            +
            {"current_steps": 300, "total_steps": 1686, "loss": 0.9176, "accuracy": 0.543749988079071, "learning_rate": 4.6196647777377475e-06, "epoch": 0.5334518781951545, "percentage": 17.79, "elapsed_time": "1:30:45", "remaining_time": "6:59:18"}
         | 
| 31 | 
            +
            {"current_steps": 310, "total_steps": 1686, "loss": 0.9431, "accuracy": 0.518750011920929, "learning_rate": 4.59459764953147e-06, "epoch": 0.5512336074683263, "percentage": 18.39, "elapsed_time": "1:33:56", "remaining_time": "6:56:58"}
         | 
| 32 | 
            +
            {"current_steps": 320, "total_steps": 1686, "loss": 0.8969, "accuracy": 0.5687500238418579, "learning_rate": 4.568803203605133e-06, "epoch": 0.5690153367414981, "percentage": 18.98, "elapsed_time": "1:36:59", "remaining_time": "6:54:02"}
         | 
| 33 | 
            +
            {"current_steps": 330, "total_steps": 1686, "loss": 0.9255, "accuracy": 0.512499988079071, "learning_rate": 4.542290396694462e-06, "epoch": 0.58679706601467, "percentage": 19.57, "elapsed_time": "1:40:09", "remaining_time": "6:51:31"}
         | 
| 34 | 
            +
            {"current_steps": 340, "total_steps": 1686, "loss": 0.9376, "accuracy": 0.581250011920929, "learning_rate": 4.515068434975298e-06, "epoch": 0.6045787952878418, "percentage": 20.17, "elapsed_time": "1:43:09", "remaining_time": "6:48:24"}
         | 
| 35 | 
            +
            {"current_steps": 350, "total_steps": 1686, "loss": 0.9343, "accuracy": 0.53125, "learning_rate": 4.487146770866887e-06, "epoch": 0.6223605245610135, "percentage": 20.76, "elapsed_time": "1:46:12", "remaining_time": "6:45:25"}
         | 
| 36 | 
            +
            {"current_steps": 360, "total_steps": 1686, "loss": 1.0312, "accuracy": 0.53125, "learning_rate": 4.458535099749666e-06, "epoch": 0.6401422538341853, "percentage": 21.35, "elapsed_time": "1:49:11", "remaining_time": "6:42:10"}
         | 
| 37 | 
            +
            {"current_steps": 370, "total_steps": 1686, "loss": 0.9596, "accuracy": 0.550000011920929, "learning_rate": 4.429243356598694e-06, "epoch": 0.6579239831073572, "percentage": 21.95, "elapsed_time": "1:52:10", "remaining_time": "6:39:00"}
         | 
| 38 | 
            +
            {"current_steps": 380, "total_steps": 1686, "loss": 0.8862, "accuracy": 0.512499988079071, "learning_rate": 4.399281712533875e-06, "epoch": 0.675705712380529, "percentage": 22.54, "elapsed_time": "1:55:04", "remaining_time": "6:35:29"}
         | 
| 39 | 
            +
            {"current_steps": 390, "total_steps": 1686, "loss": 0.917, "accuracy": 0.4937500059604645, "learning_rate": 4.368660571288192e-06, "epoch": 0.6934874416537008, "percentage": 23.13, "elapsed_time": "1:58:14", "remaining_time": "6:32:54"}
         | 
| 40 | 
            +
            {"current_steps": 400, "total_steps": 1686, "loss": 1.0041, "accuracy": 0.48750001192092896, "learning_rate": 4.337390565595163e-06, "epoch": 0.7112691709268726, "percentage": 23.72, "elapsed_time": "2:01:15", "remaining_time": "6:29:51"}
         | 
| 41 | 
            +
            {"current_steps": 410, "total_steps": 1686, "loss": 0.868, "accuracy": 0.5687500238418579, "learning_rate": 4.305482553496786e-06, "epoch": 0.7290509002000445, "percentage": 24.32, "elapsed_time": "2:04:11", "remaining_time": "6:26:29"}
         | 
| 42 | 
            +
            {"current_steps": 420, "total_steps": 1686, "loss": 0.9551, "accuracy": 0.5874999761581421, "learning_rate": 4.272947614573244e-06, "epoch": 0.7468326294732163, "percentage": 24.91, "elapsed_time": "2:07:06", "remaining_time": "6:23:07"}
         | 
| 43 | 
            +
            {"current_steps": 430, "total_steps": 1686, "loss": 0.9024, "accuracy": 0.5687500238418579, "learning_rate": 4.23979704609569e-06, "epoch": 0.7646143587463881, "percentage": 25.5, "elapsed_time": "2:10:08", "remaining_time": "6:20:08"}
         | 
| 44 | 
            +
            {"current_steps": 440, "total_steps": 1686, "loss": 0.9355, "accuracy": 0.5375000238418579, "learning_rate": 4.206042359103435e-06, "epoch": 0.78239608801956, "percentage": 26.1, "elapsed_time": "2:13:11", "remaining_time": "6:17:09"}
         | 
| 45 | 
            +
            {"current_steps": 450, "total_steps": 1686, "loss": 0.9306, "accuracy": 0.46875, "learning_rate": 4.17169527440691e-06, "epoch": 0.8001778172927317, "percentage": 26.69, "elapsed_time": "2:16:06", "remaining_time": "6:13:50"}
         | 
| 46 | 
            +
            {"current_steps": 460, "total_steps": 1686, "loss": 0.8402, "accuracy": 0.5625, "learning_rate": 4.136767718517797e-06, "epoch": 0.8179595465659035, "percentage": 27.28, "elapsed_time": "2:19:07", "remaining_time": "6:10:48"}
         | 
| 47 | 
            +
            {"current_steps": 470, "total_steps": 1686, "loss": 0.9289, "accuracy": 0.4937500059604645, "learning_rate": 4.1012718195077196e-06, "epoch": 0.8357412758390753, "percentage": 27.88, "elapsed_time": "2:22:09", "remaining_time": "6:07:48"}
         | 
| 48 | 
            +
            {"current_steps": 480, "total_steps": 1686, "loss": 0.8996, "accuracy": 0.581250011920929, "learning_rate": 4.065219902796953e-06, "epoch": 0.8535230051122472, "percentage": 28.47, "elapsed_time": "2:25:04", "remaining_time": "6:04:29"}
         | 
| 49 | 
            +
            {"current_steps": 490, "total_steps": 1686, "loss": 0.8747, "accuracy": 0.581250011920929, "learning_rate": 4.028624486874608e-06, "epoch": 0.871304734385419, "percentage": 29.06, "elapsed_time": "2:28:05", "remaining_time": "6:01:28"}
         | 
| 50 | 
            +
            {"current_steps": 500, "total_steps": 1686, "loss": 0.9464, "accuracy": 0.5062500238418579, "learning_rate": 3.99149827895177e-06, "epoch": 0.8890864636585908, "percentage": 29.66, "elapsed_time": "2:31:12", "remaining_time": "5:58:39"}
         | 
| 51 | 
            +
            {"current_steps": 500, "total_steps": 1686, "eval_loss": 0.8918758630752563, "epoch": 0.8890864636585908, "percentage": 29.66, "elapsed_time": "2:37:01", "remaining_time": "6:12:27"}
         | 
    	
        training_args.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:346457aadedbdd0c4ca7d318ce433488da82e0ffc0b5d5673bac6ee8814ec494
         | 
| 3 | 
            +
            size 5240
         | 
