Upload checkpoint-100
Browse files- .gitattributes +1 -0
- added_tokens.json +3 -0
- config.json +60 -0
- generation_config.json +13 -0
- latest +1 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +891 -0
- preprocessor_config.json +29 -0
- processor_config.json +4 -0
- rng_state_0.pth +3 -0
- rng_state_1.pth +3 -0
- rng_state_2.pth +3 -0
- rng_state_3.pth +3 -0
- rng_state_4.pth +3 -0
- rng_state_5.pth +3 -0
- rng_state_6.pth +3 -0
- rng_state_7.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +42 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- trainer_state.json +741 -0
- training_args.bin +3 -0
- zero_to_fp32.py +760 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            tokenizer.json filter=lfs diff=lfs merge=lfs -text
         | 
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "<image_soft_token>": 262144
         | 
| 3 | 
            +
            }
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,60 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "architectures": [
         | 
| 3 | 
            +
                "Gemma3ForConditionalGeneration"
         | 
| 4 | 
            +
              ],
         | 
| 5 | 
            +
              "boi_token_index": 255999,
         | 
| 6 | 
            +
              "eoi_token_index": 256000,
         | 
| 7 | 
            +
              "hidden_size": 2560,
         | 
| 8 | 
            +
              "image_token_index": 262144,
         | 
| 9 | 
            +
              "initializer_range": 0.02,
         | 
| 10 | 
            +
              "mm_tokens_per_image": 256,
         | 
| 11 | 
            +
              "model_type": "gemma3",
         | 
| 12 | 
            +
              "text_config": {
         | 
| 13 | 
            +
                "attention_bias": false,
         | 
| 14 | 
            +
                "attention_dropout": 0.0,
         | 
| 15 | 
            +
                "attn_logit_softcapping": null,
         | 
| 16 | 
            +
                "cache_implementation": "hybrid",
         | 
| 17 | 
            +
                "final_logit_softcapping": null,
         | 
| 18 | 
            +
                "head_dim": 256,
         | 
| 19 | 
            +
                "hidden_activation": "gelu_pytorch_tanh",
         | 
| 20 | 
            +
                "hidden_size": 2560,
         | 
| 21 | 
            +
                "initializer_range": 0.02,
         | 
| 22 | 
            +
                "intermediate_size": 10240,
         | 
| 23 | 
            +
                "max_position_embeddings": 131072,
         | 
| 24 | 
            +
                "model_type": "gemma3_text",
         | 
| 25 | 
            +
                "num_attention_heads": 8,
         | 
| 26 | 
            +
                "num_hidden_layers": 34,
         | 
| 27 | 
            +
                "num_key_value_heads": 4,
         | 
| 28 | 
            +
                "query_pre_attn_scalar": 256,
         | 
| 29 | 
            +
                "rms_norm_eps": 1e-06,
         | 
| 30 | 
            +
                "rope_local_base_freq": 10000.0,
         | 
| 31 | 
            +
                "rope_scaling": {
         | 
| 32 | 
            +
                  "factor": 8.0,
         | 
| 33 | 
            +
                  "rope_type": "linear"
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "rope_theta": 1000000.0,
         | 
| 36 | 
            +
                "sliding_window": 1024,
         | 
| 37 | 
            +
                "sliding_window_pattern": 6,
         | 
| 38 | 
            +
                "torch_dtype": "bfloat16",
         | 
| 39 | 
            +
                "use_cache": true,
         | 
| 40 | 
            +
                "vocab_size": 262208
         | 
| 41 | 
            +
              },
         | 
| 42 | 
            +
              "torch_dtype": "bfloat16",
         | 
| 43 | 
            +
              "transformers_version": "4.50.0.dev0",
         | 
| 44 | 
            +
              "use_cache": false,
         | 
| 45 | 
            +
              "vision_config": {
         | 
| 46 | 
            +
                "attention_dropout": 0.0,
         | 
| 47 | 
            +
                "hidden_act": "gelu_pytorch_tanh",
         | 
| 48 | 
            +
                "hidden_size": 1152,
         | 
| 49 | 
            +
                "image_size": 896,
         | 
| 50 | 
            +
                "intermediate_size": 4304,
         | 
| 51 | 
            +
                "layer_norm_eps": 1e-06,
         | 
| 52 | 
            +
                "model_type": "siglip_vision_model",
         | 
| 53 | 
            +
                "num_attention_heads": 16,
         | 
| 54 | 
            +
                "num_channels": 3,
         | 
| 55 | 
            +
                "num_hidden_layers": 27,
         | 
| 56 | 
            +
                "patch_size": 14,
         | 
| 57 | 
            +
                "torch_dtype": "bfloat16",
         | 
| 58 | 
            +
                "vision_use_head": false
         | 
| 59 | 
            +
              }
         | 
| 60 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token_id": 2,
         | 
| 3 | 
            +
              "cache_implementation": "hybrid",
         | 
| 4 | 
            +
              "do_sample": true,
         | 
| 5 | 
            +
              "eos_token_id": [
         | 
| 6 | 
            +
                1,
         | 
| 7 | 
            +
                106
         | 
| 8 | 
            +
              ],
         | 
| 9 | 
            +
              "pad_token_id": 0,
         | 
| 10 | 
            +
              "top_k": 64,
         | 
| 11 | 
            +
              "top_p": 0.95,
         | 
| 12 | 
            +
              "transformers_version": "4.50.0.dev0"
         | 
| 13 | 
            +
            }
         | 
    	
        latest
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            global_step100
         | 
    	
        model-00001-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:96020e1cd67ab01acb6756b4be29db00aff82d8062bad150f941ec6d93d06b68
         | 
| 3 | 
            +
            size 4961251752
         | 
    	
        model-00002-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:033c7d731e9a719efc11920dc4122a9aeee2f15b84765148d03718dca62c8785
         | 
| 3 | 
            +
            size 4981531360
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | @@ -0,0 +1,891 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "metadata": {
         | 
| 3 | 
            +
                "total_size": 9942663904
         | 
| 4 | 
            +
              },
         | 
| 5 | 
            +
              "weight_map": {
         | 
| 6 | 
            +
                "language_model.lm_head.weight": "model-00002-of-00002.safetensors",
         | 
| 7 | 
            +
                "language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
         | 
| 8 | 
            +
                "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 9 | 
            +
                "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 10 | 
            +
                "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 11 | 
            +
                "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 12 | 
            +
                "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 13 | 
            +
                "language_model.model.layers.0.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 14 | 
            +
                "language_model.model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 15 | 
            +
                "language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 16 | 
            +
                "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 17 | 
            +
                "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 18 | 
            +
                "language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 19 | 
            +
                "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 20 | 
            +
                "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 21 | 
            +
                "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 22 | 
            +
                "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 23 | 
            +
                "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 24 | 
            +
                "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 25 | 
            +
                "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 26 | 
            +
                "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 27 | 
            +
                "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 28 | 
            +
                "language_model.model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 29 | 
            +
                "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 30 | 
            +
                "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 31 | 
            +
                "language_model.model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 32 | 
            +
                "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 33 | 
            +
                "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 34 | 
            +
                "language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 35 | 
            +
                "language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 36 | 
            +
                "language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 37 | 
            +
                "language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 38 | 
            +
                "language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 39 | 
            +
                "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 40 | 
            +
                "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 41 | 
            +
                "language_model.model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 42 | 
            +
                "language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 43 | 
            +
                "language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 44 | 
            +
                "language_model.model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 45 | 
            +
                "language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 46 | 
            +
                "language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 47 | 
            +
                "language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 48 | 
            +
                "language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 49 | 
            +
                "language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 50 | 
            +
                "language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 51 | 
            +
                "language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 52 | 
            +
                "language_model.model.layers.11.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 53 | 
            +
                "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 54 | 
            +
                "language_model.model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 55 | 
            +
                "language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 56 | 
            +
                "language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 57 | 
            +
                "language_model.model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 58 | 
            +
                "language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 59 | 
            +
                "language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 60 | 
            +
                "language_model.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 61 | 
            +
                "language_model.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 62 | 
            +
                "language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 63 | 
            +
                "language_model.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 64 | 
            +
                "language_model.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 65 | 
            +
                "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 66 | 
            +
                "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 67 | 
            +
                "language_model.model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 68 | 
            +
                "language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 69 | 
            +
                "language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 70 | 
            +
                "language_model.model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 71 | 
            +
                "language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 72 | 
            +
                "language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 73 | 
            +
                "language_model.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 74 | 
            +
                "language_model.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 75 | 
            +
                "language_model.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 76 | 
            +
                "language_model.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 77 | 
            +
                "language_model.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 78 | 
            +
                "language_model.model.layers.13.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 79 | 
            +
                "language_model.model.layers.13.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 80 | 
            +
                "language_model.model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 81 | 
            +
                "language_model.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 82 | 
            +
                "language_model.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 83 | 
            +
                "language_model.model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 84 | 
            +
                "language_model.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 85 | 
            +
                "language_model.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 86 | 
            +
                "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 87 | 
            +
                "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 88 | 
            +
                "language_model.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 89 | 
            +
                "language_model.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 90 | 
            +
                "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 91 | 
            +
                "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 92 | 
            +
                "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 93 | 
            +
                "language_model.model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 94 | 
            +
                "language_model.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 95 | 
            +
                "language_model.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 96 | 
            +
                "language_model.model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 97 | 
            +
                "language_model.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 98 | 
            +
                "language_model.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 99 | 
            +
                "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 100 | 
            +
                "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 101 | 
            +
                "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 102 | 
            +
                "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 103 | 
            +
                "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 104 | 
            +
                "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 105 | 
            +
                "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 106 | 
            +
                "language_model.model.layers.15.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 107 | 
            +
                "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 108 | 
            +
                "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 109 | 
            +
                "language_model.model.layers.15.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 110 | 
            +
                "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 111 | 
            +
                "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 112 | 
            +
                "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 113 | 
            +
                "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 114 | 
            +
                "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 115 | 
            +
                "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 116 | 
            +
                "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 117 | 
            +
                "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 118 | 
            +
                "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 119 | 
            +
                "language_model.model.layers.16.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 120 | 
            +
                "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 121 | 
            +
                "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 122 | 
            +
                "language_model.model.layers.16.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 123 | 
            +
                "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 124 | 
            +
                "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 125 | 
            +
                "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 126 | 
            +
                "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 127 | 
            +
                "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 128 | 
            +
                "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 129 | 
            +
                "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 130 | 
            +
                "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 131 | 
            +
                "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 132 | 
            +
                "language_model.model.layers.17.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 133 | 
            +
                "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 134 | 
            +
                "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 135 | 
            +
                "language_model.model.layers.17.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 136 | 
            +
                "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 137 | 
            +
                "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 138 | 
            +
                "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 139 | 
            +
                "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 140 | 
            +
                "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 141 | 
            +
                "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 142 | 
            +
                "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 143 | 
            +
                "language_model.model.layers.18.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 144 | 
            +
                "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 145 | 
            +
                "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 146 | 
            +
                "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 147 | 
            +
                "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 148 | 
            +
                "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 149 | 
            +
                "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 150 | 
            +
                "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 151 | 
            +
                "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 152 | 
            +
                "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 153 | 
            +
                "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 154 | 
            +
                "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 155 | 
            +
                "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 156 | 
            +
                "language_model.model.layers.19.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 157 | 
            +
                "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 158 | 
            +
                "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 159 | 
            +
                "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 160 | 
            +
                "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 161 | 
            +
                "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 162 | 
            +
                "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 163 | 
            +
                "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 164 | 
            +
                "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 165 | 
            +
                "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 166 | 
            +
                "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 167 | 
            +
                "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 168 | 
            +
                "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 169 | 
            +
                "language_model.model.layers.2.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 170 | 
            +
                "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 171 | 
            +
                "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 172 | 
            +
                "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 173 | 
            +
                "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 174 | 
            +
                "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 175 | 
            +
                "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 176 | 
            +
                "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 177 | 
            +
                "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 178 | 
            +
                "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 179 | 
            +
                "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 180 | 
            +
                "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 181 | 
            +
                "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 182 | 
            +
                "language_model.model.layers.20.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 183 | 
            +
                "language_model.model.layers.20.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 184 | 
            +
                "language_model.model.layers.20.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 185 | 
            +
                "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 186 | 
            +
                "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 187 | 
            +
                "language_model.model.layers.20.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 188 | 
            +
                "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 189 | 
            +
                "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 190 | 
            +
                "language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 191 | 
            +
                "language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 192 | 
            +
                "language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 193 | 
            +
                "language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 194 | 
            +
                "language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 195 | 
            +
                "language_model.model.layers.21.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 196 | 
            +
                "language_model.model.layers.21.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 197 | 
            +
                "language_model.model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 198 | 
            +
                "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 199 | 
            +
                "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 200 | 
            +
                "language_model.model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 201 | 
            +
                "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 202 | 
            +
                "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 203 | 
            +
                "language_model.model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 204 | 
            +
                "language_model.model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 205 | 
            +
                "language_model.model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 206 | 
            +
                "language_model.model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 207 | 
            +
                "language_model.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 208 | 
            +
                "language_model.model.layers.22.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 209 | 
            +
                "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 210 | 
            +
                "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 211 | 
            +
                "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 212 | 
            +
                "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 213 | 
            +
                "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 214 | 
            +
                "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 215 | 
            +
                "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 216 | 
            +
                "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 217 | 
            +
                "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 218 | 
            +
                "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 219 | 
            +
                "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 220 | 
            +
                "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 221 | 
            +
                "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 222 | 
            +
                "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 223 | 
            +
                "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 224 | 
            +
                "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 225 | 
            +
                "language_model.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 226 | 
            +
                "language_model.model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 227 | 
            +
                "language_model.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 228 | 
            +
                "language_model.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 229 | 
            +
                "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 230 | 
            +
                "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 231 | 
            +
                "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 232 | 
            +
                "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 233 | 
            +
                "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 234 | 
            +
                "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 235 | 
            +
                "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 236 | 
            +
                "language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 237 | 
            +
                "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 238 | 
            +
                "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 239 | 
            +
                "language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 240 | 
            +
                "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 241 | 
            +
                "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 242 | 
            +
                "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 243 | 
            +
                "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 244 | 
            +
                "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 245 | 
            +
                "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 246 | 
            +
                "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 247 | 
            +
                "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 248 | 
            +
                "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 249 | 
            +
                "language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 250 | 
            +
                "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 251 | 
            +
                "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 252 | 
            +
                "language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 253 | 
            +
                "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 254 | 
            +
                "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 255 | 
            +
                "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 256 | 
            +
                "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 257 | 
            +
                "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 258 | 
            +
                "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 259 | 
            +
                "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 260 | 
            +
                "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 261 | 
            +
                "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 262 | 
            +
                "language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 263 | 
            +
                "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 264 | 
            +
                "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 265 | 
            +
                "language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 266 | 
            +
                "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 267 | 
            +
                "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 268 | 
            +
                "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 269 | 
            +
                "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 270 | 
            +
                "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 271 | 
            +
                "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 272 | 
            +
                "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 273 | 
            +
                "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 274 | 
            +
                "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 275 | 
            +
                "language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 276 | 
            +
                "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 277 | 
            +
                "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 278 | 
            +
                "language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 279 | 
            +
                "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 280 | 
            +
                "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 281 | 
            +
                "language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 282 | 
            +
                "language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 283 | 
            +
                "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 284 | 
            +
                "language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 285 | 
            +
                "language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 286 | 
            +
                "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 287 | 
            +
                "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 288 | 
            +
                "language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 289 | 
            +
                "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 290 | 
            +
                "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 291 | 
            +
                "language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 292 | 
            +
                "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 293 | 
            +
                "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 294 | 
            +
                "language_model.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 295 | 
            +
                "language_model.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 296 | 
            +
                "language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 297 | 
            +
                "language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 298 | 
            +
                "language_model.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 299 | 
            +
                "language_model.model.layers.29.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 300 | 
            +
                "language_model.model.layers.29.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 301 | 
            +
                "language_model.model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 302 | 
            +
                "language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 303 | 
            +
                "language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 304 | 
            +
                "language_model.model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 305 | 
            +
                "language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 306 | 
            +
                "language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 307 | 
            +
                "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 308 | 
            +
                "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 309 | 
            +
                "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 310 | 
            +
                "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 311 | 
            +
                "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 312 | 
            +
                "language_model.model.layers.3.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 313 | 
            +
                "language_model.model.layers.3.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 314 | 
            +
                "language_model.model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 315 | 
            +
                "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 316 | 
            +
                "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 317 | 
            +
                "language_model.model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 318 | 
            +
                "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 319 | 
            +
                "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 320 | 
            +
                "language_model.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 321 | 
            +
                "language_model.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 322 | 
            +
                "language_model.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 323 | 
            +
                "language_model.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 324 | 
            +
                "language_model.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 325 | 
            +
                "language_model.model.layers.30.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 326 | 
            +
                "language_model.model.layers.30.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 327 | 
            +
                "language_model.model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 328 | 
            +
                "language_model.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 329 | 
            +
                "language_model.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 330 | 
            +
                "language_model.model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 331 | 
            +
                "language_model.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 332 | 
            +
                "language_model.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 333 | 
            +
                "language_model.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 334 | 
            +
                "language_model.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 335 | 
            +
                "language_model.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 336 | 
            +
                "language_model.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 337 | 
            +
                "language_model.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 338 | 
            +
                "language_model.model.layers.31.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 339 | 
            +
                "language_model.model.layers.31.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 340 | 
            +
                "language_model.model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 341 | 
            +
                "language_model.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 342 | 
            +
                "language_model.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 343 | 
            +
                "language_model.model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 344 | 
            +
                "language_model.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 345 | 
            +
                "language_model.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 346 | 
            +
                "language_model.model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 347 | 
            +
                "language_model.model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 348 | 
            +
                "language_model.model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 349 | 
            +
                "language_model.model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 350 | 
            +
                "language_model.model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 351 | 
            +
                "language_model.model.layers.32.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 352 | 
            +
                "language_model.model.layers.32.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 353 | 
            +
                "language_model.model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 354 | 
            +
                "language_model.model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 355 | 
            +
                "language_model.model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 356 | 
            +
                "language_model.model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 357 | 
            +
                "language_model.model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 358 | 
            +
                "language_model.model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 359 | 
            +
                "language_model.model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 360 | 
            +
                "language_model.model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 361 | 
            +
                "language_model.model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 362 | 
            +
                "language_model.model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 363 | 
            +
                "language_model.model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 364 | 
            +
                "language_model.model.layers.33.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 365 | 
            +
                "language_model.model.layers.33.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
         | 
| 366 | 
            +
                "language_model.model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 367 | 
            +
                "language_model.model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 368 | 
            +
                "language_model.model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 369 | 
            +
                "language_model.model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
         | 
| 370 | 
            +
                "language_model.model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 371 | 
            +
                "language_model.model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         | 
| 372 | 
            +
                "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 373 | 
            +
                "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 374 | 
            +
                "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 375 | 
            +
                "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 376 | 
            +
                "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 377 | 
            +
                "language_model.model.layers.4.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 378 | 
            +
                "language_model.model.layers.4.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 379 | 
            +
                "language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 380 | 
            +
                "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 381 | 
            +
                "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 382 | 
            +
                "language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 383 | 
            +
                "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 384 | 
            +
                "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 385 | 
            +
                "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 386 | 
            +
                "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 387 | 
            +
                "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 388 | 
            +
                "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 389 | 
            +
                "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 390 | 
            +
                "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 391 | 
            +
                "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 392 | 
            +
                "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 393 | 
            +
                "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 394 | 
            +
                "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 395 | 
            +
                "language_model.model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 396 | 
            +
                "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 397 | 
            +
                "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 398 | 
            +
                "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 399 | 
            +
                "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 400 | 
            +
                "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 401 | 
            +
                "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 402 | 
            +
                "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 403 | 
            +
                "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 404 | 
            +
                "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 405 | 
            +
                "language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 406 | 
            +
                "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 407 | 
            +
                "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 408 | 
            +
                "language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 409 | 
            +
                "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 410 | 
            +
                "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 411 | 
            +
                "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 412 | 
            +
                "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 413 | 
            +
                "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 414 | 
            +
                "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 415 | 
            +
                "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 416 | 
            +
                "language_model.model.layers.7.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 417 | 
            +
                "language_model.model.layers.7.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 418 | 
            +
                "language_model.model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 419 | 
            +
                "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 420 | 
            +
                "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 421 | 
            +
                "language_model.model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 422 | 
            +
                "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 423 | 
            +
                "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 424 | 
            +
                "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 425 | 
            +
                "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 426 | 
            +
                "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 427 | 
            +
                "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 428 | 
            +
                "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 429 | 
            +
                "language_model.model.layers.8.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 430 | 
            +
                "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 431 | 
            +
                "language_model.model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 432 | 
            +
                "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 433 | 
            +
                "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 434 | 
            +
                "language_model.model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 435 | 
            +
                "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 436 | 
            +
                "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 437 | 
            +
                "language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 438 | 
            +
                "language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 439 | 
            +
                "language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 440 | 
            +
                "language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 441 | 
            +
                "language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 442 | 
            +
                "language_model.model.layers.9.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 443 | 
            +
                "language_model.model.layers.9.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
         | 
| 444 | 
            +
                "language_model.model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 445 | 
            +
                "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 446 | 
            +
                "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 447 | 
            +
                "language_model.model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 448 | 
            +
                "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 449 | 
            +
                "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 450 | 
            +
                "language_model.model.norm.weight": "model-00002-of-00002.safetensors",
         | 
| 451 | 
            +
                "multi_modal_projector.mm_input_projection_weight": "model-00001-of-00002.safetensors",
         | 
| 452 | 
            +
                "multi_modal_projector.mm_soft_emb_norm.weight": "model-00001-of-00002.safetensors",
         | 
| 453 | 
            +
                "vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00002.safetensors",
         | 
| 454 | 
            +
                "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
         | 
| 455 | 
            +
                "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00002.safetensors",
         | 
| 456 | 
            +
                "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 457 | 
            +
                "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 458 | 
            +
                "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 459 | 
            +
                "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 460 | 
            +
                "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 461 | 
            +
                "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 462 | 
            +
                "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 463 | 
            +
                "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 464 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 465 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 466 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 467 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 468 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 469 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 470 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 471 | 
            +
                "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 472 | 
            +
                "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 473 | 
            +
                "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 474 | 
            +
                "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 475 | 
            +
                "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 476 | 
            +
                "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 477 | 
            +
                "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 478 | 
            +
                "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 479 | 
            +
                "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 480 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 481 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 482 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 483 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 484 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 485 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 486 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 487 | 
            +
                "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 488 | 
            +
                "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 489 | 
            +
                "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 490 | 
            +
                "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 491 | 
            +
                "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 492 | 
            +
                "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 493 | 
            +
                "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 494 | 
            +
                "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 495 | 
            +
                "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 496 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 497 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 498 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 499 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 500 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 501 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 502 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 503 | 
            +
                "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 504 | 
            +
                "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 505 | 
            +
                "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 506 | 
            +
                "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 507 | 
            +
                "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 508 | 
            +
                "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 509 | 
            +
                "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 510 | 
            +
                "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 511 | 
            +
                "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 512 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 513 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 514 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 515 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 516 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 517 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 518 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 519 | 
            +
                "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 520 | 
            +
                "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 521 | 
            +
                "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 522 | 
            +
                "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 523 | 
            +
                "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 524 | 
            +
                "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 525 | 
            +
                "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 526 | 
            +
                "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 527 | 
            +
                "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 528 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 529 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 530 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 531 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 532 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 533 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 534 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 535 | 
            +
                "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 536 | 
            +
                "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 537 | 
            +
                "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 538 | 
            +
                "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 539 | 
            +
                "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 540 | 
            +
                "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 541 | 
            +
                "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 542 | 
            +
                "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 543 | 
            +
                "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 544 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 545 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 546 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 547 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 548 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 549 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 550 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 551 | 
            +
                "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 552 | 
            +
                "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 553 | 
            +
                "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 554 | 
            +
                "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 555 | 
            +
                "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 556 | 
            +
                "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 557 | 
            +
                "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 558 | 
            +
                "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 559 | 
            +
                "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 560 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 561 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 562 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 563 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 564 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 565 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 566 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 567 | 
            +
                "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 568 | 
            +
                "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 569 | 
            +
                "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 570 | 
            +
                "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 571 | 
            +
                "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 572 | 
            +
                "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 573 | 
            +
                "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 574 | 
            +
                "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 575 | 
            +
                "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 576 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 577 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 578 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 579 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 580 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 581 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 582 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 583 | 
            +
                "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 584 | 
            +
                "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 585 | 
            +
                "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 586 | 
            +
                "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 587 | 
            +
                "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 588 | 
            +
                "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 589 | 
            +
                "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 590 | 
            +
                "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 591 | 
            +
                "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 592 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 593 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 594 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 595 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 596 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 597 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 598 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 599 | 
            +
                "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 600 | 
            +
                "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 601 | 
            +
                "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 602 | 
            +
                "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 603 | 
            +
                "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 604 | 
            +
                "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 605 | 
            +
                "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 606 | 
            +
                "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 607 | 
            +
                "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 608 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 609 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 610 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 611 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 612 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 613 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 614 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 615 | 
            +
                "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 616 | 
            +
                "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 617 | 
            +
                "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 618 | 
            +
                "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 619 | 
            +
                "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 620 | 
            +
                "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 621 | 
            +
                "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 622 | 
            +
                "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 623 | 
            +
                "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 624 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 625 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 626 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 627 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 628 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 629 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 630 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 631 | 
            +
                "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 632 | 
            +
                "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 633 | 
            +
                "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 634 | 
            +
                "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 635 | 
            +
                "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 636 | 
            +
                "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 637 | 
            +
                "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 638 | 
            +
                "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 639 | 
            +
                "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 640 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 641 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 642 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 643 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 644 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 645 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 646 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 647 | 
            +
                "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 648 | 
            +
                "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 649 | 
            +
                "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 650 | 
            +
                "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 651 | 
            +
                "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 652 | 
            +
                "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 653 | 
            +
                "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 654 | 
            +
                "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 655 | 
            +
                "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 656 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 657 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 658 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 659 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 660 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 661 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 662 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 663 | 
            +
                "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 664 | 
            +
                "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 665 | 
            +
                "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 666 | 
            +
                "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 667 | 
            +
                "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 668 | 
            +
                "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 669 | 
            +
                "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 670 | 
            +
                "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 671 | 
            +
                "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 672 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 673 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 674 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 675 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 676 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 677 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 678 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 679 | 
            +
                "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 680 | 
            +
                "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 681 | 
            +
                "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 682 | 
            +
                "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 683 | 
            +
                "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 684 | 
            +
                "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 685 | 
            +
                "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 686 | 
            +
                "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 687 | 
            +
                "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 688 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 689 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 690 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 691 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 692 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 693 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 694 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 695 | 
            +
                "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 696 | 
            +
                "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 697 | 
            +
                "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 698 | 
            +
                "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 699 | 
            +
                "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 700 | 
            +
                "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 701 | 
            +
                "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 702 | 
            +
                "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 703 | 
            +
                "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 704 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 705 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 706 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 707 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 708 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 709 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 710 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 711 | 
            +
                "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 712 | 
            +
                "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 713 | 
            +
                "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 714 | 
            +
                "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 715 | 
            +
                "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 716 | 
            +
                "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 717 | 
            +
                "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 718 | 
            +
                "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 719 | 
            +
                "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 720 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 721 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 722 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 723 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 724 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 725 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 726 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 727 | 
            +
                "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 728 | 
            +
                "vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 729 | 
            +
                "vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 730 | 
            +
                "vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 731 | 
            +
                "vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 732 | 
            +
                "vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 733 | 
            +
                "vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 734 | 
            +
                "vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 735 | 
            +
                "vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 736 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 737 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 738 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 739 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 740 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 741 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 742 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 743 | 
            +
                "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 744 | 
            +
                "vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 745 | 
            +
                "vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 746 | 
            +
                "vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 747 | 
            +
                "vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 748 | 
            +
                "vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 749 | 
            +
                "vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 750 | 
            +
                "vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 751 | 
            +
                "vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 752 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 753 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 754 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 755 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 756 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 757 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 758 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 759 | 
            +
                "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 760 | 
            +
                "vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 761 | 
            +
                "vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 762 | 
            +
                "vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 763 | 
            +
                "vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 764 | 
            +
                "vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 765 | 
            +
                "vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 766 | 
            +
                "vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 767 | 
            +
                "vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 768 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 769 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 770 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 771 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 772 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 773 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 774 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 775 | 
            +
                "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 776 | 
            +
                "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 777 | 
            +
                "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 778 | 
            +
                "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 779 | 
            +
                "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 780 | 
            +
                "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 781 | 
            +
                "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 782 | 
            +
                "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 783 | 
            +
                "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 784 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 785 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 786 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 787 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 788 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 789 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 790 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 791 | 
            +
                "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 792 | 
            +
                "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 793 | 
            +
                "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 794 | 
            +
                "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 795 | 
            +
                "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 796 | 
            +
                "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 797 | 
            +
                "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 798 | 
            +
                "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 799 | 
            +
                "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 800 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 801 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 802 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 803 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 804 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 805 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 806 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 807 | 
            +
                "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 808 | 
            +
                "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 809 | 
            +
                "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 810 | 
            +
                "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 811 | 
            +
                "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 812 | 
            +
                "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 813 | 
            +
                "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 814 | 
            +
                "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 815 | 
            +
                "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 816 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 817 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 818 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 819 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 820 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 821 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 822 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 823 | 
            +
                "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 824 | 
            +
                "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 825 | 
            +
                "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 826 | 
            +
                "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 827 | 
            +
                "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 828 | 
            +
                "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 829 | 
            +
                "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 830 | 
            +
                "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 831 | 
            +
                "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 832 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 833 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 834 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 835 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 836 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 837 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 838 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 839 | 
            +
                "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 840 | 
            +
                "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 841 | 
            +
                "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 842 | 
            +
                "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 843 | 
            +
                "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 844 | 
            +
                "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 845 | 
            +
                "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 846 | 
            +
                "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 847 | 
            +
                "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 848 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 849 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 850 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 851 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 852 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 853 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 854 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 855 | 
            +
                "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 856 | 
            +
                "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 857 | 
            +
                "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 858 | 
            +
                "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 859 | 
            +
                "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 860 | 
            +
                "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 861 | 
            +
                "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 862 | 
            +
                "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 863 | 
            +
                "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 864 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 865 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 866 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 867 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 868 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 869 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 870 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 871 | 
            +
                "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 872 | 
            +
                "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00002.safetensors",
         | 
| 873 | 
            +
                "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00002.safetensors",
         | 
| 874 | 
            +
                "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00002.safetensors",
         | 
| 875 | 
            +
                "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00002.safetensors",
         | 
| 876 | 
            +
                "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00002.safetensors",
         | 
| 877 | 
            +
                "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00002.safetensors",
         | 
| 878 | 
            +
                "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00002.safetensors",
         | 
| 879 | 
            +
                "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00002.safetensors",
         | 
| 880 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 881 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 882 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 883 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 884 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 885 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 886 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
         | 
| 887 | 
            +
                "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         | 
| 888 | 
            +
                "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00002.safetensors",
         | 
| 889 | 
            +
                "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00002.safetensors"
         | 
| 890 | 
            +
              }
         | 
| 891 | 
            +
            }
         | 
    	
        preprocessor_config.json
    ADDED
    
    | @@ -0,0 +1,29 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "do_convert_rgb": null,
         | 
| 3 | 
            +
              "do_normalize": true,
         | 
| 4 | 
            +
              "do_pan_and_scan": null,
         | 
| 5 | 
            +
              "do_rescale": true,
         | 
| 6 | 
            +
              "do_resize": true,
         | 
| 7 | 
            +
              "image_mean": [
         | 
| 8 | 
            +
                0.5,
         | 
| 9 | 
            +
                0.5,
         | 
| 10 | 
            +
                0.5
         | 
| 11 | 
            +
              ],
         | 
| 12 | 
            +
              "image_processor_type": "Gemma3ImageProcessor",
         | 
| 13 | 
            +
              "image_seq_length": 256,
         | 
| 14 | 
            +
              "image_std": [
         | 
| 15 | 
            +
                0.5,
         | 
| 16 | 
            +
                0.5,
         | 
| 17 | 
            +
                0.5
         | 
| 18 | 
            +
              ],
         | 
| 19 | 
            +
              "pan_and_scan_max_num_crops": null,
         | 
| 20 | 
            +
              "pan_and_scan_min_crop_size": null,
         | 
| 21 | 
            +
              "pan_and_scan_min_ratio_to_activate": null,
         | 
| 22 | 
            +
              "processor_class": "Gemma3Processor",
         | 
| 23 | 
            +
              "resample": 2,
         | 
| 24 | 
            +
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            +
              "size": {
         | 
| 26 | 
            +
                "height": 896,
         | 
| 27 | 
            +
                "width": 896
         | 
| 28 | 
            +
              }
         | 
| 29 | 
            +
            }
         | 
    	
        processor_config.json
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "image_seq_length": 256,
         | 
| 3 | 
            +
              "processor_class": "Gemma3Processor"
         | 
| 4 | 
            +
            }
         | 
    	
        rng_state_0.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_1.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_2.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_3.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_4.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_5.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_6.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
         | 
| 3 | 
            +
            size 15984
         | 
    	
        rng_state_7.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
         | 
| 3 | 
            +
            size 15984
         | 
    	
        scheduler.pt
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:22620461b6cb574c85d9d71e1f71f033456cb3ac72b018148bfd0ab9360b0b2e
         | 
| 3 | 
            +
            size 1064
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,42 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "additional_special_tokens": [
         | 
| 3 | 
            +
                {
         | 
| 4 | 
            +
                  "content": "<end_of_turn>",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false
         | 
| 9 | 
            +
                }
         | 
| 10 | 
            +
              ],
         | 
| 11 | 
            +
              "boi_token": "<start_of_image>",
         | 
| 12 | 
            +
              "bos_token": {
         | 
| 13 | 
            +
                "content": "<bos>",
         | 
| 14 | 
            +
                "lstrip": false,
         | 
| 15 | 
            +
                "normalized": false,
         | 
| 16 | 
            +
                "rstrip": false,
         | 
| 17 | 
            +
                "single_word": false
         | 
| 18 | 
            +
              },
         | 
| 19 | 
            +
              "eoi_token": "<end_of_image>",
         | 
| 20 | 
            +
              "eos_token": {
         | 
| 21 | 
            +
                "content": "<eos>",
         | 
| 22 | 
            +
                "lstrip": false,
         | 
| 23 | 
            +
                "normalized": false,
         | 
| 24 | 
            +
                "rstrip": false,
         | 
| 25 | 
            +
                "single_word": false
         | 
| 26 | 
            +
              },
         | 
| 27 | 
            +
              "image_token": "<image_soft_token>",
         | 
| 28 | 
            +
              "pad_token": {
         | 
| 29 | 
            +
                "content": "<pad>",
         | 
| 30 | 
            +
                "lstrip": false,
         | 
| 31 | 
            +
                "normalized": false,
         | 
| 32 | 
            +
                "rstrip": false,
         | 
| 33 | 
            +
                "single_word": false
         | 
| 34 | 
            +
              },
         | 
| 35 | 
            +
              "unk_token": {
         | 
| 36 | 
            +
                "content": "<unk>",
         | 
| 37 | 
            +
                "lstrip": false,
         | 
| 38 | 
            +
                "normalized": false,
         | 
| 39 | 
            +
                "rstrip": false,
         | 
| 40 | 
            +
                "single_word": false
         | 
| 41 | 
            +
              }
         | 
| 42 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
         | 
| 3 | 
            +
            size 33384568
         | 
    	
        tokenizer.model
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
         | 
| 3 | 
            +
            size 4689074
         | 
    	
        tokenizer_config.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        trainer_state.json
    ADDED
    
    | @@ -0,0 +1,741 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "best_metric": null,
         | 
| 3 | 
            +
              "best_model_checkpoint": null,
         | 
| 4 | 
            +
              "epoch": 0.3367003367003367,
         | 
| 5 | 
            +
              "eval_steps": 100,
         | 
| 6 | 
            +
              "global_step": 100,
         | 
| 7 | 
            +
              "is_hyper_param_search": false,
         | 
| 8 | 
            +
              "is_local_process_zero": true,
         | 
| 9 | 
            +
              "is_world_process_zero": true,
         | 
| 10 | 
            +
              "log_history": [
         | 
| 11 | 
            +
                {
         | 
| 12 | 
            +
                  "epoch": 0.003367003367003367,
         | 
| 13 | 
            +
                  "grad_norm": 190.2997283935547,
         | 
| 14 | 
            +
                  "learning_rate": 6.711409395973154e-07,
         | 
| 15 | 
            +
                  "loss": 13.9272,
         | 
| 16 | 
            +
                  "step": 1
         | 
| 17 | 
            +
                },
         | 
| 18 | 
            +
                {
         | 
| 19 | 
            +
                  "epoch": 0.006734006734006734,
         | 
| 20 | 
            +
                  "grad_norm": 196.5933074951172,
         | 
| 21 | 
            +
                  "learning_rate": 1.3422818791946309e-06,
         | 
| 22 | 
            +
                  "loss": 14.3753,
         | 
| 23 | 
            +
                  "step": 2
         | 
| 24 | 
            +
                },
         | 
| 25 | 
            +
                {
         | 
| 26 | 
            +
                  "epoch": 0.010101010101010102,
         | 
| 27 | 
            +
                  "grad_norm": 198.02767944335938,
         | 
| 28 | 
            +
                  "learning_rate": 2.013422818791946e-06,
         | 
| 29 | 
            +
                  "loss": 14.4143,
         | 
| 30 | 
            +
                  "step": 3
         | 
| 31 | 
            +
                },
         | 
| 32 | 
            +
                {
         | 
| 33 | 
            +
                  "epoch": 0.013468013468013467,
         | 
| 34 | 
            +
                  "grad_norm": 186.30801391601562,
         | 
| 35 | 
            +
                  "learning_rate": 2.6845637583892617e-06,
         | 
| 36 | 
            +
                  "loss": 13.7729,
         | 
| 37 | 
            +
                  "step": 4
         | 
| 38 | 
            +
                },
         | 
| 39 | 
            +
                {
         | 
| 40 | 
            +
                  "epoch": 0.016835016835016835,
         | 
| 41 | 
            +
                  "grad_norm": 129.32237243652344,
         | 
| 42 | 
            +
                  "learning_rate": 3.3557046979865773e-06,
         | 
| 43 | 
            +
                  "loss": 11.4082,
         | 
| 44 | 
            +
                  "step": 5
         | 
| 45 | 
            +
                },
         | 
| 46 | 
            +
                {
         | 
| 47 | 
            +
                  "epoch": 0.020202020202020204,
         | 
| 48 | 
            +
                  "grad_norm": 123.9930191040039,
         | 
| 49 | 
            +
                  "learning_rate": 4.026845637583892e-06,
         | 
| 50 | 
            +
                  "loss": 11.5581,
         | 
| 51 | 
            +
                  "step": 6
         | 
| 52 | 
            +
                },
         | 
| 53 | 
            +
                {
         | 
| 54 | 
            +
                  "epoch": 0.02356902356902357,
         | 
| 55 | 
            +
                  "grad_norm": 102.4565658569336,
         | 
| 56 | 
            +
                  "learning_rate": 4.697986577181209e-06,
         | 
| 57 | 
            +
                  "loss": 9.8311,
         | 
| 58 | 
            +
                  "step": 7
         | 
| 59 | 
            +
                },
         | 
| 60 | 
            +
                {
         | 
| 61 | 
            +
                  "epoch": 0.026936026936026935,
         | 
| 62 | 
            +
                  "grad_norm": 98.7117919921875,
         | 
| 63 | 
            +
                  "learning_rate": 5.3691275167785235e-06,
         | 
| 64 | 
            +
                  "loss": 9.825,
         | 
| 65 | 
            +
                  "step": 8
         | 
| 66 | 
            +
                },
         | 
| 67 | 
            +
                {
         | 
| 68 | 
            +
                  "epoch": 0.030303030303030304,
         | 
| 69 | 
            +
                  "grad_norm": 121.9065170288086,
         | 
| 70 | 
            +
                  "learning_rate": 6.04026845637584e-06,
         | 
| 71 | 
            +
                  "loss": 8.5157,
         | 
| 72 | 
            +
                  "step": 9
         | 
| 73 | 
            +
                },
         | 
| 74 | 
            +
                {
         | 
| 75 | 
            +
                  "epoch": 0.03367003367003367,
         | 
| 76 | 
            +
                  "grad_norm": 93.352294921875,
         | 
| 77 | 
            +
                  "learning_rate": 6.7114093959731546e-06,
         | 
| 78 | 
            +
                  "loss": 7.6328,
         | 
| 79 | 
            +
                  "step": 10
         | 
| 80 | 
            +
                },
         | 
| 81 | 
            +
                {
         | 
| 82 | 
            +
                  "epoch": 0.037037037037037035,
         | 
| 83 | 
            +
                  "grad_norm": 108.89420318603516,
         | 
| 84 | 
            +
                  "learning_rate": 7.382550335570471e-06,
         | 
| 85 | 
            +
                  "loss": 7.1598,
         | 
| 86 | 
            +
                  "step": 11
         | 
| 87 | 
            +
                },
         | 
| 88 | 
            +
                {
         | 
| 89 | 
            +
                  "epoch": 0.04040404040404041,
         | 
| 90 | 
            +
                  "grad_norm": 191.65274047851562,
         | 
| 91 | 
            +
                  "learning_rate": 8.053691275167785e-06,
         | 
| 92 | 
            +
                  "loss": 6.237,
         | 
| 93 | 
            +
                  "step": 12
         | 
| 94 | 
            +
                },
         | 
| 95 | 
            +
                {
         | 
| 96 | 
            +
                  "epoch": 0.04377104377104377,
         | 
| 97 | 
            +
                  "grad_norm": 150.62646484375,
         | 
| 98 | 
            +
                  "learning_rate": 8.724832214765101e-06,
         | 
| 99 | 
            +
                  "loss": 5.7063,
         | 
| 100 | 
            +
                  "step": 13
         | 
| 101 | 
            +
                },
         | 
| 102 | 
            +
                {
         | 
| 103 | 
            +
                  "epoch": 0.04713804713804714,
         | 
| 104 | 
            +
                  "grad_norm": 185.48080444335938,
         | 
| 105 | 
            +
                  "learning_rate": 9.395973154362418e-06,
         | 
| 106 | 
            +
                  "loss": 5.093,
         | 
| 107 | 
            +
                  "step": 14
         | 
| 108 | 
            +
                },
         | 
| 109 | 
            +
                {
         | 
| 110 | 
            +
                  "epoch": 0.050505050505050504,
         | 
| 111 | 
            +
                  "grad_norm": 1576.556640625,
         | 
| 112 | 
            +
                  "learning_rate": 1.006711409395973e-05,
         | 
| 113 | 
            +
                  "loss": 8.3575,
         | 
| 114 | 
            +
                  "step": 15
         | 
| 115 | 
            +
                },
         | 
| 116 | 
            +
                {
         | 
| 117 | 
            +
                  "epoch": 0.05387205387205387,
         | 
| 118 | 
            +
                  "grad_norm": 441.4505310058594,
         | 
| 119 | 
            +
                  "learning_rate": 1.0738255033557047e-05,
         | 
| 120 | 
            +
                  "loss": 4.679,
         | 
| 121 | 
            +
                  "step": 16
         | 
| 122 | 
            +
                },
         | 
| 123 | 
            +
                {
         | 
| 124 | 
            +
                  "epoch": 0.05723905723905724,
         | 
| 125 | 
            +
                  "grad_norm": 499.8016357421875,
         | 
| 126 | 
            +
                  "learning_rate": 1.1409395973154363e-05,
         | 
| 127 | 
            +
                  "loss": 3.1432,
         | 
| 128 | 
            +
                  "step": 17
         | 
| 129 | 
            +
                },
         | 
| 130 | 
            +
                {
         | 
| 131 | 
            +
                  "epoch": 0.06060606060606061,
         | 
| 132 | 
            +
                  "grad_norm": 472.59747314453125,
         | 
| 133 | 
            +
                  "learning_rate": 1.208053691275168e-05,
         | 
| 134 | 
            +
                  "loss": 2.9237,
         | 
| 135 | 
            +
                  "step": 18
         | 
| 136 | 
            +
                },
         | 
| 137 | 
            +
                {
         | 
| 138 | 
            +
                  "epoch": 0.06397306397306397,
         | 
| 139 | 
            +
                  "grad_norm": 506.6687927246094,
         | 
| 140 | 
            +
                  "learning_rate": 1.2751677852348994e-05,
         | 
| 141 | 
            +
                  "loss": 2.6882,
         | 
| 142 | 
            +
                  "step": 19
         | 
| 143 | 
            +
                },
         | 
| 144 | 
            +
                {
         | 
| 145 | 
            +
                  "epoch": 0.06734006734006734,
         | 
| 146 | 
            +
                  "grad_norm": 494.16949462890625,
         | 
| 147 | 
            +
                  "learning_rate": 1.3422818791946309e-05,
         | 
| 148 | 
            +
                  "loss": 2.4807,
         | 
| 149 | 
            +
                  "step": 20
         | 
| 150 | 
            +
                },
         | 
| 151 | 
            +
                {
         | 
| 152 | 
            +
                  "epoch": 0.0707070707070707,
         | 
| 153 | 
            +
                  "grad_norm": 463.3478698730469,
         | 
| 154 | 
            +
                  "learning_rate": 1.4093959731543624e-05,
         | 
| 155 | 
            +
                  "loss": 2.2508,
         | 
| 156 | 
            +
                  "step": 21
         | 
| 157 | 
            +
                },
         | 
| 158 | 
            +
                {
         | 
| 159 | 
            +
                  "epoch": 0.07407407407407407,
         | 
| 160 | 
            +
                  "grad_norm": 422.92401123046875,
         | 
| 161 | 
            +
                  "learning_rate": 1.4765100671140942e-05,
         | 
| 162 | 
            +
                  "loss": 1.9202,
         | 
| 163 | 
            +
                  "step": 22
         | 
| 164 | 
            +
                },
         | 
| 165 | 
            +
                {
         | 
| 166 | 
            +
                  "epoch": 0.07744107744107744,
         | 
| 167 | 
            +
                  "grad_norm": 417.1321105957031,
         | 
| 168 | 
            +
                  "learning_rate": 1.5436241610738255e-05,
         | 
| 169 | 
            +
                  "loss": 1.6106,
         | 
| 170 | 
            +
                  "step": 23
         | 
| 171 | 
            +
                },
         | 
| 172 | 
            +
                {
         | 
| 173 | 
            +
                  "epoch": 0.08080808080808081,
         | 
| 174 | 
            +
                  "grad_norm": 360.2781677246094,
         | 
| 175 | 
            +
                  "learning_rate": 1.610738255033557e-05,
         | 
| 176 | 
            +
                  "loss": 1.2741,
         | 
| 177 | 
            +
                  "step": 24
         | 
| 178 | 
            +
                },
         | 
| 179 | 
            +
                {
         | 
| 180 | 
            +
                  "epoch": 0.08417508417508418,
         | 
| 181 | 
            +
                  "grad_norm": 297.3291015625,
         | 
| 182 | 
            +
                  "learning_rate": 1.6778523489932888e-05,
         | 
| 183 | 
            +
                  "loss": 1.0282,
         | 
| 184 | 
            +
                  "step": 25
         | 
| 185 | 
            +
                },
         | 
| 186 | 
            +
                {
         | 
| 187 | 
            +
                  "epoch": 0.08754208754208755,
         | 
| 188 | 
            +
                  "grad_norm": 195.75958251953125,
         | 
| 189 | 
            +
                  "learning_rate": 1.7449664429530202e-05,
         | 
| 190 | 
            +
                  "loss": 0.799,
         | 
| 191 | 
            +
                  "step": 26
         | 
| 192 | 
            +
                },
         | 
| 193 | 
            +
                {
         | 
| 194 | 
            +
                  "epoch": 0.09090909090909091,
         | 
| 195 | 
            +
                  "grad_norm": 116.36829376220703,
         | 
| 196 | 
            +
                  "learning_rate": 1.8120805369127517e-05,
         | 
| 197 | 
            +
                  "loss": 0.6593,
         | 
| 198 | 
            +
                  "step": 27
         | 
| 199 | 
            +
                },
         | 
| 200 | 
            +
                {
         | 
| 201 | 
            +
                  "epoch": 0.09427609427609428,
         | 
| 202 | 
            +
                  "grad_norm": 70.56578063964844,
         | 
| 203 | 
            +
                  "learning_rate": 1.8791946308724835e-05,
         | 
| 204 | 
            +
                  "loss": 0.5787,
         | 
| 205 | 
            +
                  "step": 28
         | 
| 206 | 
            +
                },
         | 
| 207 | 
            +
                {
         | 
| 208 | 
            +
                  "epoch": 0.09764309764309764,
         | 
| 209 | 
            +
                  "grad_norm": 45.22296905517578,
         | 
| 210 | 
            +
                  "learning_rate": 1.946308724832215e-05,
         | 
| 211 | 
            +
                  "loss": 0.5196,
         | 
| 212 | 
            +
                  "step": 29
         | 
| 213 | 
            +
                },
         | 
| 214 | 
            +
                {
         | 
| 215 | 
            +
                  "epoch": 0.10101010101010101,
         | 
| 216 | 
            +
                  "grad_norm": 20.37734603881836,
         | 
| 217 | 
            +
                  "learning_rate": 2.013422818791946e-05,
         | 
| 218 | 
            +
                  "loss": 0.4681,
         | 
| 219 | 
            +
                  "step": 30
         | 
| 220 | 
            +
                },
         | 
| 221 | 
            +
                {
         | 
| 222 | 
            +
                  "epoch": 0.10437710437710437,
         | 
| 223 | 
            +
                  "grad_norm": 7.735367298126221,
         | 
| 224 | 
            +
                  "learning_rate": 2.080536912751678e-05,
         | 
| 225 | 
            +
                  "loss": 0.4318,
         | 
| 226 | 
            +
                  "step": 31
         | 
| 227 | 
            +
                },
         | 
| 228 | 
            +
                {
         | 
| 229 | 
            +
                  "epoch": 0.10774410774410774,
         | 
| 230 | 
            +
                  "grad_norm": 4.360243797302246,
         | 
| 231 | 
            +
                  "learning_rate": 2.1476510067114094e-05,
         | 
| 232 | 
            +
                  "loss": 0.4276,
         | 
| 233 | 
            +
                  "step": 32
         | 
| 234 | 
            +
                },
         | 
| 235 | 
            +
                {
         | 
| 236 | 
            +
                  "epoch": 0.1111111111111111,
         | 
| 237 | 
            +
                  "grad_norm": 4.440345287322998,
         | 
| 238 | 
            +
                  "learning_rate": 2.2147651006711412e-05,
         | 
| 239 | 
            +
                  "loss": 0.4463,
         | 
| 240 | 
            +
                  "step": 33
         | 
| 241 | 
            +
                },
         | 
| 242 | 
            +
                {
         | 
| 243 | 
            +
                  "epoch": 0.11447811447811448,
         | 
| 244 | 
            +
                  "grad_norm": 26.992700576782227,
         | 
| 245 | 
            +
                  "learning_rate": 2.2818791946308727e-05,
         | 
| 246 | 
            +
                  "loss": 0.4394,
         | 
| 247 | 
            +
                  "step": 34
         | 
| 248 | 
            +
                },
         | 
| 249 | 
            +
                {
         | 
| 250 | 
            +
                  "epoch": 0.11784511784511785,
         | 
| 251 | 
            +
                  "grad_norm": 33.81399917602539,
         | 
| 252 | 
            +
                  "learning_rate": 2.348993288590604e-05,
         | 
| 253 | 
            +
                  "loss": 0.6005,
         | 
| 254 | 
            +
                  "step": 35
         | 
| 255 | 
            +
                },
         | 
| 256 | 
            +
                {
         | 
| 257 | 
            +
                  "epoch": 0.12121212121212122,
         | 
| 258 | 
            +
                  "grad_norm": 7.8905029296875,
         | 
| 259 | 
            +
                  "learning_rate": 2.416107382550336e-05,
         | 
| 260 | 
            +
                  "loss": 0.4963,
         | 
| 261 | 
            +
                  "step": 36
         | 
| 262 | 
            +
                },
         | 
| 263 | 
            +
                {
         | 
| 264 | 
            +
                  "epoch": 0.12457912457912458,
         | 
| 265 | 
            +
                  "grad_norm": 2.6311209201812744,
         | 
| 266 | 
            +
                  "learning_rate": 2.4832214765100674e-05,
         | 
| 267 | 
            +
                  "loss": 0.39,
         | 
| 268 | 
            +
                  "step": 37
         | 
| 269 | 
            +
                },
         | 
| 270 | 
            +
                {
         | 
| 271 | 
            +
                  "epoch": 0.12794612794612795,
         | 
| 272 | 
            +
                  "grad_norm": 2.389883041381836,
         | 
| 273 | 
            +
                  "learning_rate": 2.550335570469799e-05,
         | 
| 274 | 
            +
                  "loss": 0.3782,
         | 
| 275 | 
            +
                  "step": 38
         | 
| 276 | 
            +
                },
         | 
| 277 | 
            +
                {
         | 
| 278 | 
            +
                  "epoch": 0.13131313131313133,
         | 
| 279 | 
            +
                  "grad_norm": 2.070525646209717,
         | 
| 280 | 
            +
                  "learning_rate": 2.6174496644295304e-05,
         | 
| 281 | 
            +
                  "loss": 0.3592,
         | 
| 282 | 
            +
                  "step": 39
         | 
| 283 | 
            +
                },
         | 
| 284 | 
            +
                {
         | 
| 285 | 
            +
                  "epoch": 0.13468013468013468,
         | 
| 286 | 
            +
                  "grad_norm": 5.955089569091797,
         | 
| 287 | 
            +
                  "learning_rate": 2.6845637583892618e-05,
         | 
| 288 | 
            +
                  "loss": 0.3777,
         | 
| 289 | 
            +
                  "step": 40
         | 
| 290 | 
            +
                },
         | 
| 291 | 
            +
                {
         | 
| 292 | 
            +
                  "epoch": 0.13804713804713806,
         | 
| 293 | 
            +
                  "grad_norm": 6.50673770904541,
         | 
| 294 | 
            +
                  "learning_rate": 2.7516778523489933e-05,
         | 
| 295 | 
            +
                  "loss": 0.389,
         | 
| 296 | 
            +
                  "step": 41
         | 
| 297 | 
            +
                },
         | 
| 298 | 
            +
                {
         | 
| 299 | 
            +
                  "epoch": 0.1414141414141414,
         | 
| 300 | 
            +
                  "grad_norm": 2.0794308185577393,
         | 
| 301 | 
            +
                  "learning_rate": 2.8187919463087248e-05,
         | 
| 302 | 
            +
                  "loss": 0.3618,
         | 
| 303 | 
            +
                  "step": 42
         | 
| 304 | 
            +
                },
         | 
| 305 | 
            +
                {
         | 
| 306 | 
            +
                  "epoch": 0.1447811447811448,
         | 
| 307 | 
            +
                  "grad_norm": 1.5477614402770996,
         | 
| 308 | 
            +
                  "learning_rate": 2.885906040268457e-05,
         | 
| 309 | 
            +
                  "loss": 0.3593,
         | 
| 310 | 
            +
                  "step": 43
         | 
| 311 | 
            +
                },
         | 
| 312 | 
            +
                {
         | 
| 313 | 
            +
                  "epoch": 0.14814814814814814,
         | 
| 314 | 
            +
                  "grad_norm": 10.740438461303711,
         | 
| 315 | 
            +
                  "learning_rate": 2.9530201342281884e-05,
         | 
| 316 | 
            +
                  "loss": 0.3805,
         | 
| 317 | 
            +
                  "step": 44
         | 
| 318 | 
            +
                },
         | 
| 319 | 
            +
                {
         | 
| 320 | 
            +
                  "epoch": 0.15151515151515152,
         | 
| 321 | 
            +
                  "grad_norm": 2.993213176727295,
         | 
| 322 | 
            +
                  "learning_rate": 3.02013422818792e-05,
         | 
| 323 | 
            +
                  "loss": 0.3673,
         | 
| 324 | 
            +
                  "step": 45
         | 
| 325 | 
            +
                },
         | 
| 326 | 
            +
                {
         | 
| 327 | 
            +
                  "epoch": 0.15488215488215487,
         | 
| 328 | 
            +
                  "grad_norm": 17.512208938598633,
         | 
| 329 | 
            +
                  "learning_rate": 3.087248322147651e-05,
         | 
| 330 | 
            +
                  "loss": 0.3922,
         | 
| 331 | 
            +
                  "step": 46
         | 
| 332 | 
            +
                },
         | 
| 333 | 
            +
                {
         | 
| 334 | 
            +
                  "epoch": 0.15824915824915825,
         | 
| 335 | 
            +
                  "grad_norm": 2.5222012996673584,
         | 
| 336 | 
            +
                  "learning_rate": 3.1543624161073825e-05,
         | 
| 337 | 
            +
                  "loss": 0.3873,
         | 
| 338 | 
            +
                  "step": 47
         | 
| 339 | 
            +
                },
         | 
| 340 | 
            +
                {
         | 
| 341 | 
            +
                  "epoch": 0.16161616161616163,
         | 
| 342 | 
            +
                  "grad_norm": 0.8730729222297668,
         | 
| 343 | 
            +
                  "learning_rate": 3.221476510067114e-05,
         | 
| 344 | 
            +
                  "loss": 0.3593,
         | 
| 345 | 
            +
                  "step": 48
         | 
| 346 | 
            +
                },
         | 
| 347 | 
            +
                {
         | 
| 348 | 
            +
                  "epoch": 0.16498316498316498,
         | 
| 349 | 
            +
                  "grad_norm": 0.8050268292427063,
         | 
| 350 | 
            +
                  "learning_rate": 3.288590604026846e-05,
         | 
| 351 | 
            +
                  "loss": 0.3491,
         | 
| 352 | 
            +
                  "step": 49
         | 
| 353 | 
            +
                },
         | 
| 354 | 
            +
                {
         | 
| 355 | 
            +
                  "epoch": 0.16835016835016836,
         | 
| 356 | 
            +
                  "grad_norm": 0.7536938190460205,
         | 
| 357 | 
            +
                  "learning_rate": 3.3557046979865775e-05,
         | 
| 358 | 
            +
                  "loss": 0.3469,
         | 
| 359 | 
            +
                  "step": 50
         | 
| 360 | 
            +
                },
         | 
| 361 | 
            +
                {
         | 
| 362 | 
            +
                  "epoch": 0.1717171717171717,
         | 
| 363 | 
            +
                  "grad_norm": 0.9090268015861511,
         | 
| 364 | 
            +
                  "learning_rate": 3.422818791946309e-05,
         | 
| 365 | 
            +
                  "loss": 0.3663,
         | 
| 366 | 
            +
                  "step": 51
         | 
| 367 | 
            +
                },
         | 
| 368 | 
            +
                {
         | 
| 369 | 
            +
                  "epoch": 0.1750841750841751,
         | 
| 370 | 
            +
                  "grad_norm": 0.8775368928909302,
         | 
| 371 | 
            +
                  "learning_rate": 3.4899328859060405e-05,
         | 
| 372 | 
            +
                  "loss": 0.3489,
         | 
| 373 | 
            +
                  "step": 52
         | 
| 374 | 
            +
                },
         | 
| 375 | 
            +
                {
         | 
| 376 | 
            +
                  "epoch": 0.17845117845117844,
         | 
| 377 | 
            +
                  "grad_norm": 0.5326427221298218,
         | 
| 378 | 
            +
                  "learning_rate": 3.557046979865772e-05,
         | 
| 379 | 
            +
                  "loss": 0.3466,
         | 
| 380 | 
            +
                  "step": 53
         | 
| 381 | 
            +
                },
         | 
| 382 | 
            +
                {
         | 
| 383 | 
            +
                  "epoch": 0.18181818181818182,
         | 
| 384 | 
            +
                  "grad_norm": 0.561137318611145,
         | 
| 385 | 
            +
                  "learning_rate": 3.6241610738255034e-05,
         | 
| 386 | 
            +
                  "loss": 0.3393,
         | 
| 387 | 
            +
                  "step": 54
         | 
| 388 | 
            +
                },
         | 
| 389 | 
            +
                {
         | 
| 390 | 
            +
                  "epoch": 0.18518518518518517,
         | 
| 391 | 
            +
                  "grad_norm": 0.8053128123283386,
         | 
| 392 | 
            +
                  "learning_rate": 3.6912751677852356e-05,
         | 
| 393 | 
            +
                  "loss": 0.352,
         | 
| 394 | 
            +
                  "step": 55
         | 
| 395 | 
            +
                },
         | 
| 396 | 
            +
                {
         | 
| 397 | 
            +
                  "epoch": 0.18855218855218855,
         | 
| 398 | 
            +
                  "grad_norm": 0.5964087843894958,
         | 
| 399 | 
            +
                  "learning_rate": 3.758389261744967e-05,
         | 
| 400 | 
            +
                  "loss": 0.3507,
         | 
| 401 | 
            +
                  "step": 56
         | 
| 402 | 
            +
                },
         | 
| 403 | 
            +
                {
         | 
| 404 | 
            +
                  "epoch": 0.1919191919191919,
         | 
| 405 | 
            +
                  "grad_norm": 0.5998376607894897,
         | 
| 406 | 
            +
                  "learning_rate": 3.8255033557046985e-05,
         | 
| 407 | 
            +
                  "loss": 0.3504,
         | 
| 408 | 
            +
                  "step": 57
         | 
| 409 | 
            +
                },
         | 
| 410 | 
            +
                {
         | 
| 411 | 
            +
                  "epoch": 0.19528619528619529,
         | 
| 412 | 
            +
                  "grad_norm": 1.2634875774383545,
         | 
| 413 | 
            +
                  "learning_rate": 3.89261744966443e-05,
         | 
| 414 | 
            +
                  "loss": 0.337,
         | 
| 415 | 
            +
                  "step": 58
         | 
| 416 | 
            +
                },
         | 
| 417 | 
            +
                {
         | 
| 418 | 
            +
                  "epoch": 0.19865319865319866,
         | 
| 419 | 
            +
                  "grad_norm": 0.5703901648521423,
         | 
| 420 | 
            +
                  "learning_rate": 3.959731543624161e-05,
         | 
| 421 | 
            +
                  "loss": 0.3408,
         | 
| 422 | 
            +
                  "step": 59
         | 
| 423 | 
            +
                },
         | 
| 424 | 
            +
                {
         | 
| 425 | 
            +
                  "epoch": 0.20202020202020202,
         | 
| 426 | 
            +
                  "grad_norm": 0.7656762003898621,
         | 
| 427 | 
            +
                  "learning_rate": 4.026845637583892e-05,
         | 
| 428 | 
            +
                  "loss": 0.3206,
         | 
| 429 | 
            +
                  "step": 60
         | 
| 430 | 
            +
                },
         | 
| 431 | 
            +
                {
         | 
| 432 | 
            +
                  "epoch": 0.2053872053872054,
         | 
| 433 | 
            +
                  "grad_norm": 0.6210582852363586,
         | 
| 434 | 
            +
                  "learning_rate": 4.0939597315436244e-05,
         | 
| 435 | 
            +
                  "loss": 0.354,
         | 
| 436 | 
            +
                  "step": 61
         | 
| 437 | 
            +
                },
         | 
| 438 | 
            +
                {
         | 
| 439 | 
            +
                  "epoch": 0.20875420875420875,
         | 
| 440 | 
            +
                  "grad_norm": 0.6622840166091919,
         | 
| 441 | 
            +
                  "learning_rate": 4.161073825503356e-05,
         | 
| 442 | 
            +
                  "loss": 0.3439,
         | 
| 443 | 
            +
                  "step": 62
         | 
| 444 | 
            +
                },
         | 
| 445 | 
            +
                {
         | 
| 446 | 
            +
                  "epoch": 0.21212121212121213,
         | 
| 447 | 
            +
                  "grad_norm": 0.46426376700401306,
         | 
| 448 | 
            +
                  "learning_rate": 4.228187919463087e-05,
         | 
| 449 | 
            +
                  "loss": 0.3434,
         | 
| 450 | 
            +
                  "step": 63
         | 
| 451 | 
            +
                },
         | 
| 452 | 
            +
                {
         | 
| 453 | 
            +
                  "epoch": 0.21548821548821548,
         | 
| 454 | 
            +
                  "grad_norm": 0.38662126660346985,
         | 
| 455 | 
            +
                  "learning_rate": 4.295302013422819e-05,
         | 
| 456 | 
            +
                  "loss": 0.3362,
         | 
| 457 | 
            +
                  "step": 64
         | 
| 458 | 
            +
                },
         | 
| 459 | 
            +
                {
         | 
| 460 | 
            +
                  "epoch": 0.21885521885521886,
         | 
| 461 | 
            +
                  "grad_norm": 0.5812459588050842,
         | 
| 462 | 
            +
                  "learning_rate": 4.36241610738255e-05,
         | 
| 463 | 
            +
                  "loss": 0.323,
         | 
| 464 | 
            +
                  "step": 65
         | 
| 465 | 
            +
                },
         | 
| 466 | 
            +
                {
         | 
| 467 | 
            +
                  "epoch": 0.2222222222222222,
         | 
| 468 | 
            +
                  "grad_norm": 0.626932680606842,
         | 
| 469 | 
            +
                  "learning_rate": 4.4295302013422824e-05,
         | 
| 470 | 
            +
                  "loss": 0.3427,
         | 
| 471 | 
            +
                  "step": 66
         | 
| 472 | 
            +
                },
         | 
| 473 | 
            +
                {
         | 
| 474 | 
            +
                  "epoch": 0.2255892255892256,
         | 
| 475 | 
            +
                  "grad_norm": 0.5491658449172974,
         | 
| 476 | 
            +
                  "learning_rate": 4.496644295302014e-05,
         | 
| 477 | 
            +
                  "loss": 0.3406,
         | 
| 478 | 
            +
                  "step": 67
         | 
| 479 | 
            +
                },
         | 
| 480 | 
            +
                {
         | 
| 481 | 
            +
                  "epoch": 0.22895622895622897,
         | 
| 482 | 
            +
                  "grad_norm": 0.4023520052433014,
         | 
| 483 | 
            +
                  "learning_rate": 4.5637583892617453e-05,
         | 
| 484 | 
            +
                  "loss": 0.3328,
         | 
| 485 | 
            +
                  "step": 68
         | 
| 486 | 
            +
                },
         | 
| 487 | 
            +
                {
         | 
| 488 | 
            +
                  "epoch": 0.23232323232323232,
         | 
| 489 | 
            +
                  "grad_norm": 0.478535532951355,
         | 
| 490 | 
            +
                  "learning_rate": 4.630872483221477e-05,
         | 
| 491 | 
            +
                  "loss": 0.3402,
         | 
| 492 | 
            +
                  "step": 69
         | 
| 493 | 
            +
                },
         | 
| 494 | 
            +
                {
         | 
| 495 | 
            +
                  "epoch": 0.2356902356902357,
         | 
| 496 | 
            +
                  "grad_norm": 0.44869011640548706,
         | 
| 497 | 
            +
                  "learning_rate": 4.697986577181208e-05,
         | 
| 498 | 
            +
                  "loss": 0.3516,
         | 
| 499 | 
            +
                  "step": 70
         | 
| 500 | 
            +
                },
         | 
| 501 | 
            +
                {
         | 
| 502 | 
            +
                  "epoch": 0.23905723905723905,
         | 
| 503 | 
            +
                  "grad_norm": 0.4810108244419098,
         | 
| 504 | 
            +
                  "learning_rate": 4.76510067114094e-05,
         | 
| 505 | 
            +
                  "loss": 0.3411,
         | 
| 506 | 
            +
                  "step": 71
         | 
| 507 | 
            +
                },
         | 
| 508 | 
            +
                {
         | 
| 509 | 
            +
                  "epoch": 0.24242424242424243,
         | 
| 510 | 
            +
                  "grad_norm": 0.3956281542778015,
         | 
| 511 | 
            +
                  "learning_rate": 4.832214765100672e-05,
         | 
| 512 | 
            +
                  "loss": 0.3395,
         | 
| 513 | 
            +
                  "step": 72
         | 
| 514 | 
            +
                },
         | 
| 515 | 
            +
                {
         | 
| 516 | 
            +
                  "epoch": 0.24579124579124578,
         | 
| 517 | 
            +
                  "grad_norm": 0.40301939845085144,
         | 
| 518 | 
            +
                  "learning_rate": 4.8993288590604034e-05,
         | 
| 519 | 
            +
                  "loss": 0.3217,
         | 
| 520 | 
            +
                  "step": 73
         | 
| 521 | 
            +
                },
         | 
| 522 | 
            +
                {
         | 
| 523 | 
            +
                  "epoch": 0.24915824915824916,
         | 
| 524 | 
            +
                  "grad_norm": 0.44550034403800964,
         | 
| 525 | 
            +
                  "learning_rate": 4.966442953020135e-05,
         | 
| 526 | 
            +
                  "loss": 0.3257,
         | 
| 527 | 
            +
                  "step": 74
         | 
| 528 | 
            +
                },
         | 
| 529 | 
            +
                {
         | 
| 530 | 
            +
                  "epoch": 0.25252525252525254,
         | 
| 531 | 
            +
                  "grad_norm": 0.5890341997146606,
         | 
| 532 | 
            +
                  "learning_rate": 5.033557046979866e-05,
         | 
| 533 | 
            +
                  "loss": 0.3335,
         | 
| 534 | 
            +
                  "step": 75
         | 
| 535 | 
            +
                },
         | 
| 536 | 
            +
                {
         | 
| 537 | 
            +
                  "epoch": 0.2558922558922559,
         | 
| 538 | 
            +
                  "grad_norm": 0.8096022009849548,
         | 
| 539 | 
            +
                  "learning_rate": 5.100671140939598e-05,
         | 
| 540 | 
            +
                  "loss": 0.3421,
         | 
| 541 | 
            +
                  "step": 76
         | 
| 542 | 
            +
                },
         | 
| 543 | 
            +
                {
         | 
| 544 | 
            +
                  "epoch": 0.25925925925925924,
         | 
| 545 | 
            +
                  "grad_norm": 0.6044747829437256,
         | 
| 546 | 
            +
                  "learning_rate": 5.167785234899329e-05,
         | 
| 547 | 
            +
                  "loss": 0.3266,
         | 
| 548 | 
            +
                  "step": 77
         | 
| 549 | 
            +
                },
         | 
| 550 | 
            +
                {
         | 
| 551 | 
            +
                  "epoch": 0.26262626262626265,
         | 
| 552 | 
            +
                  "grad_norm": 0.5191451907157898,
         | 
| 553 | 
            +
                  "learning_rate": 5.234899328859061e-05,
         | 
| 554 | 
            +
                  "loss": 0.331,
         | 
| 555 | 
            +
                  "step": 78
         | 
| 556 | 
            +
                },
         | 
| 557 | 
            +
                {
         | 
| 558 | 
            +
                  "epoch": 0.265993265993266,
         | 
| 559 | 
            +
                  "grad_norm": 1.0799261331558228,
         | 
| 560 | 
            +
                  "learning_rate": 5.302013422818792e-05,
         | 
| 561 | 
            +
                  "loss": 0.3243,
         | 
| 562 | 
            +
                  "step": 79
         | 
| 563 | 
            +
                },
         | 
| 564 | 
            +
                {
         | 
| 565 | 
            +
                  "epoch": 0.26936026936026936,
         | 
| 566 | 
            +
                  "grad_norm": 5.513405799865723,
         | 
| 567 | 
            +
                  "learning_rate": 5.3691275167785237e-05,
         | 
| 568 | 
            +
                  "loss": 0.379,
         | 
| 569 | 
            +
                  "step": 80
         | 
| 570 | 
            +
                },
         | 
| 571 | 
            +
                {
         | 
| 572 | 
            +
                  "epoch": 0.2727272727272727,
         | 
| 573 | 
            +
                  "grad_norm": 0.673650860786438,
         | 
| 574 | 
            +
                  "learning_rate": 5.436241610738255e-05,
         | 
| 575 | 
            +
                  "loss": 0.3482,
         | 
| 576 | 
            +
                  "step": 81
         | 
| 577 | 
            +
                },
         | 
| 578 | 
            +
                {
         | 
| 579 | 
            +
                  "epoch": 0.2760942760942761,
         | 
| 580 | 
            +
                  "grad_norm": 1.1485897302627563,
         | 
| 581 | 
            +
                  "learning_rate": 5.5033557046979866e-05,
         | 
| 582 | 
            +
                  "loss": 0.3351,
         | 
| 583 | 
            +
                  "step": 82
         | 
| 584 | 
            +
                },
         | 
| 585 | 
            +
                {
         | 
| 586 | 
            +
                  "epoch": 0.27946127946127947,
         | 
| 587 | 
            +
                  "grad_norm": 0.5018780827522278,
         | 
| 588 | 
            +
                  "learning_rate": 5.570469798657718e-05,
         | 
| 589 | 
            +
                  "loss": 0.3077,
         | 
| 590 | 
            +
                  "step": 83
         | 
| 591 | 
            +
                },
         | 
| 592 | 
            +
                {
         | 
| 593 | 
            +
                  "epoch": 0.2828282828282828,
         | 
| 594 | 
            +
                  "grad_norm": 4.367802619934082,
         | 
| 595 | 
            +
                  "learning_rate": 5.6375838926174495e-05,
         | 
| 596 | 
            +
                  "loss": 0.3284,
         | 
| 597 | 
            +
                  "step": 84
         | 
| 598 | 
            +
                },
         | 
| 599 | 
            +
                {
         | 
| 600 | 
            +
                  "epoch": 0.28619528619528617,
         | 
| 601 | 
            +
                  "grad_norm": 33.46516036987305,
         | 
| 602 | 
            +
                  "learning_rate": 5.704697986577181e-05,
         | 
| 603 | 
            +
                  "loss": 1.0651,
         | 
| 604 | 
            +
                  "step": 85
         | 
| 605 | 
            +
                },
         | 
| 606 | 
            +
                {
         | 
| 607 | 
            +
                  "epoch": 0.2895622895622896,
         | 
| 608 | 
            +
                  "grad_norm": 91.36512756347656,
         | 
| 609 | 
            +
                  "learning_rate": 5.771812080536914e-05,
         | 
| 610 | 
            +
                  "loss": 1.7174,
         | 
| 611 | 
            +
                  "step": 86
         | 
| 612 | 
            +
                },
         | 
| 613 | 
            +
                {
         | 
| 614 | 
            +
                  "epoch": 0.29292929292929293,
         | 
| 615 | 
            +
                  "grad_norm": 9.666085243225098,
         | 
| 616 | 
            +
                  "learning_rate": 5.838926174496645e-05,
         | 
| 617 | 
            +
                  "loss": 0.5601,
         | 
| 618 | 
            +
                  "step": 87
         | 
| 619 | 
            +
                },
         | 
| 620 | 
            +
                {
         | 
| 621 | 
            +
                  "epoch": 0.2962962962962963,
         | 
| 622 | 
            +
                  "grad_norm": 8.608613967895508,
         | 
| 623 | 
            +
                  "learning_rate": 5.906040268456377e-05,
         | 
| 624 | 
            +
                  "loss": 0.3865,
         | 
| 625 | 
            +
                  "step": 88
         | 
| 626 | 
            +
                },
         | 
| 627 | 
            +
                {
         | 
| 628 | 
            +
                  "epoch": 0.2996632996632997,
         | 
| 629 | 
            +
                  "grad_norm": 3.025059223175049,
         | 
| 630 | 
            +
                  "learning_rate": 5.973154362416108e-05,
         | 
| 631 | 
            +
                  "loss": 0.358,
         | 
| 632 | 
            +
                  "step": 89
         | 
| 633 | 
            +
                },
         | 
| 634 | 
            +
                {
         | 
| 635 | 
            +
                  "epoch": 0.30303030303030304,
         | 
| 636 | 
            +
                  "grad_norm": 9.862916946411133,
         | 
| 637 | 
            +
                  "learning_rate": 6.04026845637584e-05,
         | 
| 638 | 
            +
                  "loss": 0.4464,
         | 
| 639 | 
            +
                  "step": 90
         | 
| 640 | 
            +
                },
         | 
| 641 | 
            +
                {
         | 
| 642 | 
            +
                  "epoch": 0.3063973063973064,
         | 
| 643 | 
            +
                  "grad_norm": 11.05635929107666,
         | 
| 644 | 
            +
                  "learning_rate": 6.107382550335571e-05,
         | 
| 645 | 
            +
                  "loss": 0.3977,
         | 
| 646 | 
            +
                  "step": 91
         | 
| 647 | 
            +
                },
         | 
| 648 | 
            +
                {
         | 
| 649 | 
            +
                  "epoch": 0.30976430976430974,
         | 
| 650 | 
            +
                  "grad_norm": 1.0226973295211792,
         | 
| 651 | 
            +
                  "learning_rate": 6.174496644295302e-05,
         | 
| 652 | 
            +
                  "loss": 0.3206,
         | 
| 653 | 
            +
                  "step": 92
         | 
| 654 | 
            +
                },
         | 
| 655 | 
            +
                {
         | 
| 656 | 
            +
                  "epoch": 0.31313131313131315,
         | 
| 657 | 
            +
                  "grad_norm": 1.007895827293396,
         | 
| 658 | 
            +
                  "learning_rate": 6.241610738255034e-05,
         | 
| 659 | 
            +
                  "loss": 0.3355,
         | 
| 660 | 
            +
                  "step": 93
         | 
| 661 | 
            +
                },
         | 
| 662 | 
            +
                {
         | 
| 663 | 
            +
                  "epoch": 0.3164983164983165,
         | 
| 664 | 
            +
                  "grad_norm": 1.5956454277038574,
         | 
| 665 | 
            +
                  "learning_rate": 6.308724832214765e-05,
         | 
| 666 | 
            +
                  "loss": 0.3408,
         | 
| 667 | 
            +
                  "step": 94
         | 
| 668 | 
            +
                },
         | 
| 669 | 
            +
                {
         | 
| 670 | 
            +
                  "epoch": 0.31986531986531985,
         | 
| 671 | 
            +
                  "grad_norm": 21.75948715209961,
         | 
| 672 | 
            +
                  "learning_rate": 6.375838926174497e-05,
         | 
| 673 | 
            +
                  "loss": 0.4627,
         | 
| 674 | 
            +
                  "step": 95
         | 
| 675 | 
            +
                },
         | 
| 676 | 
            +
                {
         | 
| 677 | 
            +
                  "epoch": 0.32323232323232326,
         | 
| 678 | 
            +
                  "grad_norm": 5.754608154296875,
         | 
| 679 | 
            +
                  "learning_rate": 6.442953020134228e-05,
         | 
| 680 | 
            +
                  "loss": 0.3818,
         | 
| 681 | 
            +
                  "step": 96
         | 
| 682 | 
            +
                },
         | 
| 683 | 
            +
                {
         | 
| 684 | 
            +
                  "epoch": 0.3265993265993266,
         | 
| 685 | 
            +
                  "grad_norm": 3.1888318061828613,
         | 
| 686 | 
            +
                  "learning_rate": 6.51006711409396e-05,
         | 
| 687 | 
            +
                  "loss": 0.3713,
         | 
| 688 | 
            +
                  "step": 97
         | 
| 689 | 
            +
                },
         | 
| 690 | 
            +
                {
         | 
| 691 | 
            +
                  "epoch": 0.32996632996632996,
         | 
| 692 | 
            +
                  "grad_norm": 4.586446762084961,
         | 
| 693 | 
            +
                  "learning_rate": 6.577181208053692e-05,
         | 
| 694 | 
            +
                  "loss": 0.3394,
         | 
| 695 | 
            +
                  "step": 98
         | 
| 696 | 
            +
                },
         | 
| 697 | 
            +
                {
         | 
| 698 | 
            +
                  "epoch": 0.3333333333333333,
         | 
| 699 | 
            +
                  "grad_norm": 0.9332061409950256,
         | 
| 700 | 
            +
                  "learning_rate": 6.644295302013423e-05,
         | 
| 701 | 
            +
                  "loss": 0.3267,
         | 
| 702 | 
            +
                  "step": 99
         | 
| 703 | 
            +
                },
         | 
| 704 | 
            +
                {
         | 
| 705 | 
            +
                  "epoch": 0.3367003367003367,
         | 
| 706 | 
            +
                  "grad_norm": 4.119638442993164,
         | 
| 707 | 
            +
                  "learning_rate": 6.711409395973155e-05,
         | 
| 708 | 
            +
                  "loss": 0.3825,
         | 
| 709 | 
            +
                  "step": 100
         | 
| 710 | 
            +
                },
         | 
| 711 | 
            +
                {
         | 
| 712 | 
            +
                  "epoch": 0.3367003367003367,
         | 
| 713 | 
            +
                  "eval_loss": 0.16432031989097595,
         | 
| 714 | 
            +
                  "eval_runtime": 33.0116,
         | 
| 715 | 
            +
                  "eval_samples_per_second": 30.292,
         | 
| 716 | 
            +
                  "eval_steps_per_second": 1.908,
         | 
| 717 | 
            +
                  "step": 100
         | 
| 718 | 
            +
                }
         | 
| 719 | 
            +
              ],
         | 
| 720 | 
            +
              "logging_steps": 1,
         | 
| 721 | 
            +
              "max_steps": 1485,
         | 
| 722 | 
            +
              "num_input_tokens_seen": 0,
         | 
| 723 | 
            +
              "num_train_epochs": 5,
         | 
| 724 | 
            +
              "save_steps": 100,
         | 
| 725 | 
            +
              "stateful_callbacks": {
         | 
| 726 | 
            +
                "TrainerControl": {
         | 
| 727 | 
            +
                  "args": {
         | 
| 728 | 
            +
                    "should_epoch_stop": false,
         | 
| 729 | 
            +
                    "should_evaluate": false,
         | 
| 730 | 
            +
                    "should_log": false,
         | 
| 731 | 
            +
                    "should_save": true,
         | 
| 732 | 
            +
                    "should_training_stop": false
         | 
| 733 | 
            +
                  },
         | 
| 734 | 
            +
                  "attributes": {}
         | 
| 735 | 
            +
                }
         | 
| 736 | 
            +
              },
         | 
| 737 | 
            +
              "total_flos": 1.3367497566715904e+17,
         | 
| 738 | 
            +
              "train_batch_size": 4,
         | 
| 739 | 
            +
              "trial_name": null,
         | 
| 740 | 
            +
              "trial_params": null
         | 
| 741 | 
            +
            }
         | 
    	
        training_args.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:6508d83f63ab1198e30dbd0ff3243ef9c7492121e98c91692ed234e29d5c5577
         | 
| 3 | 
            +
            size 7288
         | 
    	
        zero_to_fp32.py
    ADDED
    
    | @@ -0,0 +1,760 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            # Copyright (c) Microsoft Corporation.
         | 
| 4 | 
            +
            # SPDX-License-Identifier: Apache-2.0
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # DeepSpeed Team
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
         | 
| 9 | 
            +
            # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
         | 
| 10 | 
            +
            # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
         | 
| 11 | 
            +
            # application.
         | 
| 12 | 
            +
            #
         | 
| 13 | 
            +
            # example:
         | 
| 14 | 
            +
            #   python zero_to_fp32.py . output_dir/
         | 
| 15 | 
            +
            #   or
         | 
| 16 | 
            +
            #   python zero_to_fp32.py . output_dir/ --safe_serialization
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            import argparse
         | 
| 19 | 
            +
            import torch
         | 
| 20 | 
            +
            import glob
         | 
| 21 | 
            +
            import math
         | 
| 22 | 
            +
            import os
         | 
| 23 | 
            +
            import re
         | 
| 24 | 
            +
            import gc
         | 
| 25 | 
            +
            import json
         | 
| 26 | 
            +
            import numpy as np
         | 
| 27 | 
            +
            from tqdm import tqdm
         | 
| 28 | 
            +
            from collections import OrderedDict
         | 
| 29 | 
            +
            from dataclasses import dataclass
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
         | 
| 32 | 
            +
            # DeepSpeed data structures it has to be available in the current python environment.
         | 
| 33 | 
            +
            from deepspeed.utils import logger
         | 
| 34 | 
            +
            from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
         | 
| 35 | 
            +
                                                        FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
         | 
| 36 | 
            +
                                                        FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
            @dataclass
         | 
| 40 | 
            +
            class zero_model_state:
         | 
| 41 | 
            +
                buffers: dict()
         | 
| 42 | 
            +
                param_shapes: dict()
         | 
| 43 | 
            +
                shared_params: list
         | 
| 44 | 
            +
                ds_version: int
         | 
| 45 | 
            +
                frozen_param_shapes: dict()
         | 
| 46 | 
            +
                frozen_param_fragments: dict()
         | 
| 47 | 
            +
             | 
| 48 | 
            +
             | 
| 49 | 
            +
            debug = 0
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            # load to cpu
         | 
| 52 | 
            +
            device = torch.device('cpu')
         | 
| 53 | 
            +
             | 
| 54 | 
            +
             | 
| 55 | 
            +
            def atoi(text):
         | 
| 56 | 
            +
                return int(text) if text.isdigit() else text
         | 
| 57 | 
            +
             | 
| 58 | 
            +
             | 
| 59 | 
            +
            def natural_keys(text):
         | 
| 60 | 
            +
                '''
         | 
| 61 | 
            +
                alist.sort(key=natural_keys) sorts in human order
         | 
| 62 | 
            +
                http://nedbatchelder.com/blog/200712/human_sorting.html
         | 
| 63 | 
            +
                (See Toothy's implementation in the comments)
         | 
| 64 | 
            +
                '''
         | 
| 65 | 
            +
                return [atoi(c) for c in re.split(r'(\d+)', text)]
         | 
| 66 | 
            +
             | 
| 67 | 
            +
             | 
| 68 | 
            +
            def get_model_state_file(checkpoint_dir, zero_stage):
         | 
| 69 | 
            +
                if not os.path.isdir(checkpoint_dir):
         | 
| 70 | 
            +
                    raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
         | 
| 71 | 
            +
             | 
| 72 | 
            +
                # there should be only one file
         | 
| 73 | 
            +
                if zero_stage <= 2:
         | 
| 74 | 
            +
                    file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
         | 
| 75 | 
            +
                elif zero_stage == 3:
         | 
| 76 | 
            +
                    file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
         | 
| 77 | 
            +
             | 
| 78 | 
            +
                if not os.path.exists(file):
         | 
| 79 | 
            +
                    raise FileNotFoundError(f"can't find model states file at '{file}'")
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                return file
         | 
| 82 | 
            +
             | 
| 83 | 
            +
             | 
| 84 | 
            +
            def get_checkpoint_files(checkpoint_dir, glob_pattern):
         | 
| 85 | 
            +
                # XXX: need to test that this simple glob rule works for multi-node setup too
         | 
| 86 | 
            +
                ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                if len(ckpt_files) == 0:
         | 
| 89 | 
            +
                    raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                return ckpt_files
         | 
| 92 | 
            +
             | 
| 93 | 
            +
             | 
| 94 | 
            +
            def get_optim_files(checkpoint_dir):
         | 
| 95 | 
            +
                return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
         | 
| 96 | 
            +
             | 
| 97 | 
            +
             | 
| 98 | 
            +
            def get_model_state_files(checkpoint_dir):
         | 
| 99 | 
            +
                return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
         | 
| 100 | 
            +
             | 
| 101 | 
            +
             | 
| 102 | 
            +
            def parse_model_states(files):
         | 
| 103 | 
            +
                zero_model_states = []
         | 
| 104 | 
            +
                for file in files:
         | 
| 105 | 
            +
                    state_dict = torch.load(file, map_location=device, weights_only=False)
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                    if BUFFER_NAMES not in state_dict:
         | 
| 108 | 
            +
                        raise ValueError(f"{file} is not a model state checkpoint")
         | 
| 109 | 
            +
                    buffer_names = state_dict[BUFFER_NAMES]
         | 
| 110 | 
            +
                    if debug:
         | 
| 111 | 
            +
                        print("Found buffers:", buffer_names)
         | 
| 112 | 
            +
             | 
| 113 | 
            +
                    # recover just the buffers while restoring them to fp32 if they were saved in fp16
         | 
| 114 | 
            +
                    buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
         | 
| 115 | 
            +
                    param_shapes = state_dict[PARAM_SHAPES]
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                    # collect parameters that are included in param_shapes
         | 
| 118 | 
            +
                    param_names = []
         | 
| 119 | 
            +
                    for s in param_shapes:
         | 
| 120 | 
            +
                        for name in s.keys():
         | 
| 121 | 
            +
                            param_names.append(name)
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                    # update with frozen parameters
         | 
| 124 | 
            +
                    frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
         | 
| 125 | 
            +
                    if frozen_param_shapes is not None:
         | 
| 126 | 
            +
                        if debug:
         | 
| 127 | 
            +
                            print(f"Found frozen_param_shapes: {frozen_param_shapes}")
         | 
| 128 | 
            +
                        param_names += list(frozen_param_shapes.keys())
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                    # handle shared params
         | 
| 131 | 
            +
                    shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                    ds_version = state_dict.get(DS_VERSION, None)
         | 
| 134 | 
            +
             | 
| 135 | 
            +
                    frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                    z_model_state = zero_model_state(buffers=buffers,
         | 
| 138 | 
            +
                                                     param_shapes=param_shapes,
         | 
| 139 | 
            +
                                                     shared_params=shared_params,
         | 
| 140 | 
            +
                                                     ds_version=ds_version,
         | 
| 141 | 
            +
                                                     frozen_param_shapes=frozen_param_shapes,
         | 
| 142 | 
            +
                                                     frozen_param_fragments=frozen_param_fragments)
         | 
| 143 | 
            +
                    zero_model_states.append(z_model_state)
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                return zero_model_states
         | 
| 146 | 
            +
             | 
| 147 | 
            +
             | 
| 148 | 
            +
            def parse_optim_states(files, ds_checkpoint_dir):
         | 
| 149 | 
            +
                total_files = len(files)
         | 
| 150 | 
            +
                state_dicts = []
         | 
| 151 | 
            +
                for f in tqdm(files, desc='Loading checkpoint shards'):
         | 
| 152 | 
            +
                    state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
         | 
| 153 | 
            +
                    # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
         | 
| 154 | 
            +
                    # and also handle the case where it was already removed by another helper script
         | 
| 155 | 
            +
                    state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
         | 
| 156 | 
            +
                    state_dicts.append(state_dict)
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
         | 
| 159 | 
            +
                    raise ValueError(f"{files[0]} is not a zero checkpoint")
         | 
| 160 | 
            +
                zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
         | 
| 161 | 
            +
                world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
         | 
| 164 | 
            +
                # parameters can be different from data parallelism for non-expert parameters. So we can just
         | 
| 165 | 
            +
                # use the max of the partition_count to get the dp world_size.
         | 
| 166 | 
            +
             | 
| 167 | 
            +
                if type(world_size) is list:
         | 
| 168 | 
            +
                    world_size = max(world_size)
         | 
| 169 | 
            +
             | 
| 170 | 
            +
                if world_size != total_files:
         | 
| 171 | 
            +
                    raise ValueError(
         | 
| 172 | 
            +
                        f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
         | 
| 173 | 
            +
                        "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
         | 
| 174 | 
            +
                    )
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                # the groups are named differently in each stage
         | 
| 177 | 
            +
                if zero_stage <= 2:
         | 
| 178 | 
            +
                    fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
         | 
| 179 | 
            +
                elif zero_stage == 3:
         | 
| 180 | 
            +
                    fp32_groups_key = FP32_FLAT_GROUPS
         | 
| 181 | 
            +
                else:
         | 
| 182 | 
            +
                    raise ValueError(f"unknown zero stage {zero_stage}")
         | 
| 183 | 
            +
             | 
| 184 | 
            +
                fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
         | 
| 185 | 
            +
                return zero_stage, world_size, fp32_flat_groups
         | 
| 186 | 
            +
             | 
| 187 | 
            +
             | 
| 188 | 
            +
            def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
         | 
| 189 | 
            +
                """
         | 
| 190 | 
            +
                Returns fp32 state_dict reconstructed from ds checkpoint
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                Args:
         | 
| 193 | 
            +
                    - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                """
         | 
| 196 | 
            +
                print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                optim_files = get_optim_files(ds_checkpoint_dir)
         | 
| 199 | 
            +
                zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
         | 
| 200 | 
            +
                print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                model_files = get_model_state_files(ds_checkpoint_dir)
         | 
| 203 | 
            +
             | 
| 204 | 
            +
                zero_model_states = parse_model_states(model_files)
         | 
| 205 | 
            +
                print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
         | 
| 206 | 
            +
             | 
| 207 | 
            +
                if zero_stage <= 2:
         | 
| 208 | 
            +
                    return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
         | 
| 209 | 
            +
                                                                      exclude_frozen_parameters)
         | 
| 210 | 
            +
                elif zero_stage == 3:
         | 
| 211 | 
            +
                    return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
         | 
| 212 | 
            +
                                                                      exclude_frozen_parameters)
         | 
| 213 | 
            +
             | 
| 214 | 
            +
             | 
| 215 | 
            +
            def _zero2_merge_frozen_params(state_dict, zero_model_states):
         | 
| 216 | 
            +
                if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
         | 
| 217 | 
            +
                    return
         | 
| 218 | 
            +
             | 
| 219 | 
            +
                frozen_param_shapes = zero_model_states[0].frozen_param_shapes
         | 
| 220 | 
            +
                frozen_param_fragments = zero_model_states[0].frozen_param_fragments
         | 
| 221 | 
            +
             | 
| 222 | 
            +
                if debug:
         | 
| 223 | 
            +
                    num_elem = sum(s.numel() for s in frozen_param_shapes.values())
         | 
| 224 | 
            +
                    print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
         | 
| 225 | 
            +
             | 
| 226 | 
            +
                    wanted_params = len(frozen_param_shapes)
         | 
| 227 | 
            +
                    wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
         | 
| 228 | 
            +
                    avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
         | 
| 229 | 
            +
                    print(f'Frozen params: Have {avail_numel} numels to process.')
         | 
| 230 | 
            +
                    print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
         | 
| 231 | 
            +
             | 
| 232 | 
            +
                total_params = 0
         | 
| 233 | 
            +
                total_numel = 0
         | 
| 234 | 
            +
                for name, shape in frozen_param_shapes.items():
         | 
| 235 | 
            +
                    total_params += 1
         | 
| 236 | 
            +
                    unpartitioned_numel = shape.numel()
         | 
| 237 | 
            +
                    total_numel += unpartitioned_numel
         | 
| 238 | 
            +
             | 
| 239 | 
            +
                    state_dict[name] = frozen_param_fragments[name]
         | 
| 240 | 
            +
             | 
| 241 | 
            +
                    if debug:
         | 
| 242 | 
            +
                        print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
         | 
| 245 | 
            +
             | 
| 246 | 
            +
             | 
| 247 | 
            +
            def _has_callable(obj, fn):
         | 
| 248 | 
            +
                attr = getattr(obj, fn, None)
         | 
| 249 | 
            +
                return callable(attr)
         | 
| 250 | 
            +
             | 
| 251 | 
            +
             | 
| 252 | 
            +
            def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
         | 
| 253 | 
            +
                param_shapes = zero_model_states[0].param_shapes
         | 
| 254 | 
            +
             | 
| 255 | 
            +
                # Reconstruction protocol:
         | 
| 256 | 
            +
                #
         | 
| 257 | 
            +
                # XXX: document this
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                if debug:
         | 
| 260 | 
            +
                    for i in range(world_size):
         | 
| 261 | 
            +
                        for j in range(len(fp32_flat_groups[0])):
         | 
| 262 | 
            +
                            print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                # XXX: memory usage doubles here (zero2)
         | 
| 265 | 
            +
                num_param_groups = len(fp32_flat_groups[0])
         | 
| 266 | 
            +
                merged_single_partition_of_fp32_groups = []
         | 
| 267 | 
            +
                for i in range(num_param_groups):
         | 
| 268 | 
            +
                    merged_partitions = [sd[i] for sd in fp32_flat_groups]
         | 
| 269 | 
            +
                    full_single_fp32_vector = torch.cat(merged_partitions, 0)
         | 
| 270 | 
            +
                    merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
         | 
| 271 | 
            +
                avail_numel = sum(
         | 
| 272 | 
            +
                    [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
         | 
| 273 | 
            +
             | 
| 274 | 
            +
                if debug:
         | 
| 275 | 
            +
                    wanted_params = sum([len(shapes) for shapes in param_shapes])
         | 
| 276 | 
            +
                    wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
         | 
| 277 | 
            +
                    # not asserting if there is a mismatch due to possible padding
         | 
| 278 | 
            +
                    print(f"Have {avail_numel} numels to process.")
         | 
| 279 | 
            +
                    print(f"Need {wanted_numel} numels in {wanted_params} params.")
         | 
| 280 | 
            +
             | 
| 281 | 
            +
                # params
         | 
| 282 | 
            +
                # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
         | 
| 283 | 
            +
                # out-of-core computing solution
         | 
| 284 | 
            +
                total_numel = 0
         | 
| 285 | 
            +
                total_params = 0
         | 
| 286 | 
            +
                for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
         | 
| 287 | 
            +
                    offset = 0
         | 
| 288 | 
            +
                    avail_numel = full_single_fp32_vector.numel()
         | 
| 289 | 
            +
                    for name, shape in shapes.items():
         | 
| 290 | 
            +
             | 
| 291 | 
            +
                        unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
         | 
| 292 | 
            +
                        total_numel += unpartitioned_numel
         | 
| 293 | 
            +
                        total_params += 1
         | 
| 294 | 
            +
             | 
| 295 | 
            +
                        if debug:
         | 
| 296 | 
            +
                            print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
         | 
| 297 | 
            +
                        state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
         | 
| 298 | 
            +
                        offset += unpartitioned_numel
         | 
| 299 | 
            +
             | 
| 300 | 
            +
                    # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
         | 
| 301 | 
            +
                    # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
         | 
| 302 | 
            +
                    # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
         | 
| 303 | 
            +
                    # live optimizer object, so we are checking that the numbers are within the right range
         | 
| 304 | 
            +
                    align_to = 2 * world_size
         | 
| 305 | 
            +
             | 
| 306 | 
            +
                    def zero2_align(x):
         | 
| 307 | 
            +
                        return align_to * math.ceil(x / align_to)
         | 
| 308 | 
            +
             | 
| 309 | 
            +
                    if debug:
         | 
| 310 | 
            +
                        print(f"original offset={offset}, avail_numel={avail_numel}")
         | 
| 311 | 
            +
             | 
| 312 | 
            +
                    offset = zero2_align(offset)
         | 
| 313 | 
            +
                    avail_numel = zero2_align(avail_numel)
         | 
| 314 | 
            +
             | 
| 315 | 
            +
                    if debug:
         | 
| 316 | 
            +
                        print(f"aligned  offset={offset}, avail_numel={avail_numel}")
         | 
| 317 | 
            +
             | 
| 318 | 
            +
                    # Sanity check
         | 
| 319 | 
            +
                    if offset != avail_numel:
         | 
| 320 | 
            +
                        raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
         | 
| 321 | 
            +
             | 
| 322 | 
            +
                print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
         | 
| 323 | 
            +
             | 
| 324 | 
            +
             | 
| 325 | 
            +
            def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
         | 
| 326 | 
            +
                                                           exclude_frozen_parameters):
         | 
| 327 | 
            +
                state_dict = OrderedDict()
         | 
| 328 | 
            +
             | 
| 329 | 
            +
                # buffers
         | 
| 330 | 
            +
                buffers = zero_model_states[0].buffers
         | 
| 331 | 
            +
                state_dict.update(buffers)
         | 
| 332 | 
            +
                if debug:
         | 
| 333 | 
            +
                    print(f"added {len(buffers)} buffers")
         | 
| 334 | 
            +
             | 
| 335 | 
            +
                if not exclude_frozen_parameters:
         | 
| 336 | 
            +
                    _zero2_merge_frozen_params(state_dict, zero_model_states)
         | 
| 337 | 
            +
             | 
| 338 | 
            +
                _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
         | 
| 339 | 
            +
             | 
| 340 | 
            +
                # recover shared parameters
         | 
| 341 | 
            +
                for pair in zero_model_states[0].shared_params:
         | 
| 342 | 
            +
                    if pair[1] in state_dict:
         | 
| 343 | 
            +
                        state_dict[pair[0]] = state_dict[pair[1]]
         | 
| 344 | 
            +
             | 
| 345 | 
            +
                return state_dict
         | 
| 346 | 
            +
             | 
| 347 | 
            +
             | 
| 348 | 
            +
            def zero3_partitioned_param_info(unpartitioned_numel, world_size):
         | 
| 349 | 
            +
                remainder = unpartitioned_numel % world_size
         | 
| 350 | 
            +
                padding_numel = (world_size - remainder) if remainder else 0
         | 
| 351 | 
            +
                partitioned_numel = math.ceil(unpartitioned_numel / world_size)
         | 
| 352 | 
            +
                return partitioned_numel, padding_numel
         | 
| 353 | 
            +
             | 
| 354 | 
            +
             | 
| 355 | 
            +
            def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
         | 
| 356 | 
            +
                if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
         | 
| 357 | 
            +
                    return
         | 
| 358 | 
            +
             | 
| 359 | 
            +
                if debug:
         | 
| 360 | 
            +
                    for i in range(world_size):
         | 
| 361 | 
            +
                        num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
         | 
| 362 | 
            +
                        print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
         | 
| 363 | 
            +
             | 
| 364 | 
            +
                    frozen_param_shapes = zero_model_states[0].frozen_param_shapes
         | 
| 365 | 
            +
                    wanted_params = len(frozen_param_shapes)
         | 
| 366 | 
            +
                    wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
         | 
| 367 | 
            +
                    avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
         | 
| 368 | 
            +
                    print(f'Frozen params: Have {avail_numel} numels to process.')
         | 
| 369 | 
            +
                    print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
         | 
| 370 | 
            +
             | 
| 371 | 
            +
                total_params = 0
         | 
| 372 | 
            +
                total_numel = 0
         | 
| 373 | 
            +
                for name, shape in zero_model_states[0].frozen_param_shapes.items():
         | 
| 374 | 
            +
                    total_params += 1
         | 
| 375 | 
            +
                    unpartitioned_numel = shape.numel()
         | 
| 376 | 
            +
                    total_numel += unpartitioned_numel
         | 
| 377 | 
            +
             | 
| 378 | 
            +
                    param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
         | 
| 379 | 
            +
                    state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
         | 
| 380 | 
            +
             | 
| 381 | 
            +
                    partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
         | 
| 382 | 
            +
             | 
| 383 | 
            +
                    if debug:
         | 
| 384 | 
            +
                        print(
         | 
| 385 | 
            +
                            f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
         | 
| 386 | 
            +
                        )
         | 
| 387 | 
            +
             | 
| 388 | 
            +
                print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
         | 
| 389 | 
            +
             | 
| 390 | 
            +
             | 
| 391 | 
            +
            class GatheredTensor:
         | 
| 392 | 
            +
                """
         | 
| 393 | 
            +
                A pseudo tensor that collects partitioned weights.
         | 
| 394 | 
            +
                It is more memory efficient when there are multiple groups.
         | 
| 395 | 
            +
                """
         | 
| 396 | 
            +
             | 
| 397 | 
            +
                def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
         | 
| 398 | 
            +
                    self.flat_groups = flat_groups
         | 
| 399 | 
            +
                    self.flat_groups_offset = flat_groups_offset
         | 
| 400 | 
            +
                    self.offset = offset
         | 
| 401 | 
            +
                    self.partitioned_numel = partitioned_numel
         | 
| 402 | 
            +
                    self.shape = shape
         | 
| 403 | 
            +
                    self.dtype = self.flat_groups[0][0].dtype
         | 
| 404 | 
            +
             | 
| 405 | 
            +
                def contiguous(self):
         | 
| 406 | 
            +
                    """
         | 
| 407 | 
            +
                    Merge partitioned weights from flat_groups into a single tensor.
         | 
| 408 | 
            +
                    """
         | 
| 409 | 
            +
                    end_idx = self.offset + self.partitioned_numel
         | 
| 410 | 
            +
                    world_size = len(self.flat_groups)
         | 
| 411 | 
            +
                    pad_flat_param_chunks = []
         | 
| 412 | 
            +
             | 
| 413 | 
            +
                    for rank_i in range(world_size):
         | 
| 414 | 
            +
                        # for each rank, we need to collect weights from related group/groups
         | 
| 415 | 
            +
                        flat_groups_at_rank_i = self.flat_groups[rank_i]
         | 
| 416 | 
            +
                        start_group_id = None
         | 
| 417 | 
            +
                        end_group_id = None
         | 
| 418 | 
            +
                        for group_id in range(len(self.flat_groups_offset)):
         | 
| 419 | 
            +
                            if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
         | 
| 420 | 
            +
                                start_group_id = group_id
         | 
| 421 | 
            +
                            if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
         | 
| 422 | 
            +
                                end_group_id = group_id
         | 
| 423 | 
            +
                                break
         | 
| 424 | 
            +
                        # collect weights from related group/groups
         | 
| 425 | 
            +
                        for group_id in range(start_group_id, end_group_id + 1):
         | 
| 426 | 
            +
                            flat_tensor = flat_groups_at_rank_i[group_id]
         | 
| 427 | 
            +
                            start_offset = self.offset - self.flat_groups_offset[group_id]
         | 
| 428 | 
            +
                            end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
         | 
| 429 | 
            +
                            pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
         | 
| 430 | 
            +
             | 
| 431 | 
            +
                    # collect weights from all ranks
         | 
| 432 | 
            +
                    pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
         | 
| 433 | 
            +
                    param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
         | 
| 434 | 
            +
                    return param
         | 
| 435 | 
            +
             | 
| 436 | 
            +
             | 
| 437 | 
            +
            def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
         | 
| 438 | 
            +
                param_shapes = zero_model_states[0].param_shapes
         | 
| 439 | 
            +
                avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
         | 
| 440 | 
            +
             | 
| 441 | 
            +
                # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
         | 
| 442 | 
            +
                # param, re-consolidating each param, while dealing with padding if any
         | 
| 443 | 
            +
             | 
| 444 | 
            +
                # merge list of dicts, preserving order
         | 
| 445 | 
            +
                param_shapes = {k: v for d in param_shapes for k, v in d.items()}
         | 
| 446 | 
            +
             | 
| 447 | 
            +
                if debug:
         | 
| 448 | 
            +
                    for i in range(world_size):
         | 
| 449 | 
            +
                        print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
         | 
| 450 | 
            +
             | 
| 451 | 
            +
                    wanted_params = len(param_shapes)
         | 
| 452 | 
            +
                    wanted_numel = sum(shape.numel() for shape in param_shapes.values())
         | 
| 453 | 
            +
                    # not asserting if there is a mismatch due to possible padding
         | 
| 454 | 
            +
                    avail_numel = fp32_flat_groups[0].numel() * world_size
         | 
| 455 | 
            +
                    print(f"Trainable params: Have {avail_numel} numels to process.")
         | 
| 456 | 
            +
                    print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
         | 
| 457 | 
            +
             | 
| 458 | 
            +
                # params
         | 
| 459 | 
            +
                # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
         | 
| 460 | 
            +
                # out-of-core computing solution
         | 
| 461 | 
            +
                offset = 0
         | 
| 462 | 
            +
                total_numel = 0
         | 
| 463 | 
            +
                total_params = 0
         | 
| 464 | 
            +
                flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
         | 
| 465 | 
            +
                for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
         | 
| 466 | 
            +
                    unpartitioned_numel = shape.numel()
         | 
| 467 | 
            +
                    total_numel += unpartitioned_numel
         | 
| 468 | 
            +
                    total_params += 1
         | 
| 469 | 
            +
                    partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
         | 
| 470 | 
            +
             | 
| 471 | 
            +
                    if debug:
         | 
| 472 | 
            +
                        print(
         | 
| 473 | 
            +
                            f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
         | 
| 474 | 
            +
                        )
         | 
| 475 | 
            +
             | 
| 476 | 
            +
                    # memory efficient tensor
         | 
| 477 | 
            +
                    tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
         | 
| 478 | 
            +
                    state_dict[name] = tensor
         | 
| 479 | 
            +
                    offset += partitioned_numel
         | 
| 480 | 
            +
             | 
| 481 | 
            +
                offset *= world_size
         | 
| 482 | 
            +
             | 
| 483 | 
            +
                # Sanity check
         | 
| 484 | 
            +
                if offset != avail_numel:
         | 
| 485 | 
            +
                    raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
         | 
| 486 | 
            +
             | 
| 487 | 
            +
                print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
         | 
| 488 | 
            +
             | 
| 489 | 
            +
             | 
| 490 | 
            +
            def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
         | 
| 491 | 
            +
                                                           exclude_frozen_parameters):
         | 
| 492 | 
            +
                state_dict = OrderedDict()
         | 
| 493 | 
            +
             | 
| 494 | 
            +
                # buffers
         | 
| 495 | 
            +
                buffers = zero_model_states[0].buffers
         | 
| 496 | 
            +
                state_dict.update(buffers)
         | 
| 497 | 
            +
                if debug:
         | 
| 498 | 
            +
                    print(f"added {len(buffers)} buffers")
         | 
| 499 | 
            +
             | 
| 500 | 
            +
                if not exclude_frozen_parameters:
         | 
| 501 | 
            +
                    _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
         | 
| 502 | 
            +
             | 
| 503 | 
            +
                _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
         | 
| 504 | 
            +
             | 
| 505 | 
            +
                # recover shared parameters
         | 
| 506 | 
            +
                for pair in zero_model_states[0].shared_params:
         | 
| 507 | 
            +
                    if pair[1] in state_dict:
         | 
| 508 | 
            +
                        state_dict[pair[0]] = state_dict[pair[1]]
         | 
| 509 | 
            +
             | 
| 510 | 
            +
                return state_dict
         | 
| 511 | 
            +
             | 
| 512 | 
            +
             | 
| 513 | 
            +
            def to_torch_tensor(state_dict, return_empty_tensor=False):
         | 
| 514 | 
            +
                """
         | 
| 515 | 
            +
                Convert state_dict of GatheredTensor to torch tensor
         | 
| 516 | 
            +
                """
         | 
| 517 | 
            +
                torch_state_dict = {}
         | 
| 518 | 
            +
                converted_tensors = {}
         | 
| 519 | 
            +
                for name, tensor in state_dict.items():
         | 
| 520 | 
            +
                    tensor_id = id(tensor)
         | 
| 521 | 
            +
                    if tensor_id in converted_tensors:  # shared tensors
         | 
| 522 | 
            +
                        shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
         | 
| 523 | 
            +
                        torch_state_dict[name] = shared_tensor
         | 
| 524 | 
            +
                    else:
         | 
| 525 | 
            +
                        converted_tensors[tensor_id] = name
         | 
| 526 | 
            +
                        if return_empty_tensor:
         | 
| 527 | 
            +
                            torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
         | 
| 528 | 
            +
                        else:
         | 
| 529 | 
            +
                            torch_state_dict[name] = tensor.contiguous()
         | 
| 530 | 
            +
                return torch_state_dict
         | 
| 531 | 
            +
             | 
| 532 | 
            +
             | 
| 533 | 
            +
            def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
         | 
| 534 | 
            +
                                                         tag=None,
         | 
| 535 | 
            +
                                                         exclude_frozen_parameters=False,
         | 
| 536 | 
            +
                                                         lazy_mode=False):
         | 
| 537 | 
            +
                """
         | 
| 538 | 
            +
                Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
         | 
| 539 | 
            +
                ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
         | 
| 540 | 
            +
                via a model hub.
         | 
| 541 | 
            +
             | 
| 542 | 
            +
                Args:
         | 
| 543 | 
            +
                    - ``checkpoint_dir``: path to the desired checkpoint folder
         | 
| 544 | 
            +
                    - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
         | 
| 545 | 
            +
                    - ``exclude_frozen_parameters``: exclude frozen parameters
         | 
| 546 | 
            +
                    - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
         | 
| 547 | 
            +
                      Convert the pesduo tensor to torch tensor by ``.contiguous()``
         | 
| 548 | 
            +
             | 
| 549 | 
            +
                Returns:
         | 
| 550 | 
            +
                    - pytorch ``state_dict``
         | 
| 551 | 
            +
             | 
| 552 | 
            +
                A typical usage might be ::
         | 
| 553 | 
            +
             | 
| 554 | 
            +
                    from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
         | 
| 555 | 
            +
                    # do the training and checkpoint saving
         | 
| 556 | 
            +
                    state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
         | 
| 557 | 
            +
                    model = model.cpu() # move to cpu
         | 
| 558 | 
            +
                    model.load_state_dict(state_dict)
         | 
| 559 | 
            +
                    # submit to model hub or save the model to share with others
         | 
| 560 | 
            +
             | 
| 561 | 
            +
                In this example the ``model`` will no longer be usable in the deepspeed context of the same
         | 
| 562 | 
            +
                application. i.e. you will need to re-initialize the deepspeed engine, since
         | 
| 563 | 
            +
                ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
         | 
| 564 | 
            +
             | 
| 565 | 
            +
                If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
         | 
| 566 | 
            +
             | 
| 567 | 
            +
                Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
         | 
| 568 | 
            +
                You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
         | 
| 569 | 
            +
                the checkpoint. Or you can load state_dict in lazy mode ::
         | 
| 570 | 
            +
             | 
| 571 | 
            +
                    from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
         | 
| 572 | 
            +
                    state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
         | 
| 573 | 
            +
                    for name, lazy_tensor in state_dict.item():
         | 
| 574 | 
            +
                        tensor = lazy_tensor.contiguous()  # to cpu
         | 
| 575 | 
            +
                        print(name, tensor)
         | 
| 576 | 
            +
                        # del tensor to release memory if it no longer in use
         | 
| 577 | 
            +
                """
         | 
| 578 | 
            +
                if tag is None:
         | 
| 579 | 
            +
                    latest_path = os.path.join(checkpoint_dir, 'latest')
         | 
| 580 | 
            +
                    if os.path.isfile(latest_path):
         | 
| 581 | 
            +
                        with open(latest_path, 'r') as fd:
         | 
| 582 | 
            +
                            tag = fd.read().strip()
         | 
| 583 | 
            +
                    else:
         | 
| 584 | 
            +
                        raise ValueError(f"Unable to find 'latest' file at {latest_path}")
         | 
| 585 | 
            +
             | 
| 586 | 
            +
                ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
         | 
| 587 | 
            +
             | 
| 588 | 
            +
                if not os.path.isdir(ds_checkpoint_dir):
         | 
| 589 | 
            +
                    raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
         | 
| 590 | 
            +
             | 
| 591 | 
            +
                state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
         | 
| 592 | 
            +
                if lazy_mode:
         | 
| 593 | 
            +
                    return state_dict
         | 
| 594 | 
            +
                else:
         | 
| 595 | 
            +
                    return to_torch_tensor(state_dict)
         | 
| 596 | 
            +
             | 
| 597 | 
            +
             | 
| 598 | 
            +
            def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
         | 
| 599 | 
            +
                                                           output_dir,
         | 
| 600 | 
            +
                                                           max_shard_size="5GB",
         | 
| 601 | 
            +
                                                           safe_serialization=False,
         | 
| 602 | 
            +
                                                           tag=None,
         | 
| 603 | 
            +
                                                           exclude_frozen_parameters=False):
         | 
| 604 | 
            +
                """
         | 
| 605 | 
            +
                Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
         | 
| 606 | 
            +
                loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
         | 
| 607 | 
            +
             | 
| 608 | 
            +
                Args:
         | 
| 609 | 
            +
                    - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
         | 
| 610 | 
            +
                    - ``output_dir``: directory to the pytorch fp32 state_dict output files
         | 
| 611 | 
            +
                    - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
         | 
| 612 | 
            +
                    - ``safe_serialization``:  whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
         | 
| 613 | 
            +
                    - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
         | 
| 614 | 
            +
                    - ``exclude_frozen_parameters``: exclude frozen parameters
         | 
| 615 | 
            +
                """
         | 
| 616 | 
            +
             | 
| 617 | 
            +
                # Dependency pre-check
         | 
| 618 | 
            +
                if safe_serialization:
         | 
| 619 | 
            +
                    try:
         | 
| 620 | 
            +
                        from safetensors.torch import save_file
         | 
| 621 | 
            +
                    except ImportError:
         | 
| 622 | 
            +
                        print('If you want to use `safe_serialization`, please `pip install safetensors`')
         | 
| 623 | 
            +
                        raise
         | 
| 624 | 
            +
                if max_shard_size is not None:
         | 
| 625 | 
            +
                    try:
         | 
| 626 | 
            +
                        from huggingface_hub import split_torch_state_dict_into_shards
         | 
| 627 | 
            +
                    except ImportError:
         | 
| 628 | 
            +
                        print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
         | 
| 629 | 
            +
                        raise
         | 
| 630 | 
            +
             | 
| 631 | 
            +
                # Convert zero checkpoint to state_dict
         | 
| 632 | 
            +
                state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
         | 
| 633 | 
            +
                                                                      tag,
         | 
| 634 | 
            +
                                                                      exclude_frozen_parameters,
         | 
| 635 | 
            +
                                                                      lazy_mode=True)
         | 
| 636 | 
            +
             | 
| 637 | 
            +
                # Shard the model if it is too big.
         | 
| 638 | 
            +
                weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
         | 
| 639 | 
            +
                if max_shard_size is not None:
         | 
| 640 | 
            +
                    filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
         | 
| 641 | 
            +
                    # an memory-efficient approach for sharding
         | 
| 642 | 
            +
                    empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
         | 
| 643 | 
            +
                    state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
         | 
| 644 | 
            +
                                                                          filename_pattern=filename_pattern,
         | 
| 645 | 
            +
                                                                          max_shard_size=max_shard_size)
         | 
| 646 | 
            +
                else:
         | 
| 647 | 
            +
                    from collections import namedtuple
         | 
| 648 | 
            +
                    StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
         | 
| 649 | 
            +
                    state_dict_split = StateDictSplit(is_sharded=False,
         | 
| 650 | 
            +
                                                      filename_to_tensors={weights_name: list(state_dict.keys())})
         | 
| 651 | 
            +
             | 
| 652 | 
            +
                # Save the model by shard
         | 
| 653 | 
            +
                os.makedirs(output_dir, exist_ok=True)
         | 
| 654 | 
            +
                filename_to_tensors = state_dict_split.filename_to_tensors.items()
         | 
| 655 | 
            +
                for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
         | 
| 656 | 
            +
                    shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
         | 
| 657 | 
            +
                    shard_state_dict = to_torch_tensor(shard_state_dict)
         | 
| 658 | 
            +
                    output_path = os.path.join(output_dir, shard_file)
         | 
| 659 | 
            +
                    if safe_serialization:
         | 
| 660 | 
            +
                        save_file(shard_state_dict, output_path, metadata={"format": "pt"})
         | 
| 661 | 
            +
                    else:
         | 
| 662 | 
            +
                        torch.save(shard_state_dict, output_path)
         | 
| 663 | 
            +
                    # release the memory of current shard
         | 
| 664 | 
            +
                    for tensor_name in list(shard_state_dict.keys()):
         | 
| 665 | 
            +
                        del state_dict[tensor_name]
         | 
| 666 | 
            +
                        del shard_state_dict[tensor_name]
         | 
| 667 | 
            +
                    del shard_state_dict
         | 
| 668 | 
            +
                    gc.collect()
         | 
| 669 | 
            +
             | 
| 670 | 
            +
                # Save index if sharded
         | 
| 671 | 
            +
                if state_dict_split.is_sharded:
         | 
| 672 | 
            +
                    index = {
         | 
| 673 | 
            +
                        "metadata": state_dict_split.metadata,
         | 
| 674 | 
            +
                        "weight_map": state_dict_split.tensor_to_filename,
         | 
| 675 | 
            +
                    }
         | 
| 676 | 
            +
                    save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
         | 
| 677 | 
            +
                    save_index_file = os.path.join(output_dir, save_index_file)
         | 
| 678 | 
            +
                    with open(save_index_file, "w", encoding="utf-8") as f:
         | 
| 679 | 
            +
                        content = json.dumps(index, indent=2, sort_keys=True) + "\n"
         | 
| 680 | 
            +
                        f.write(content)
         | 
| 681 | 
            +
             | 
| 682 | 
            +
             | 
| 683 | 
            +
            def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
         | 
| 684 | 
            +
                """
         | 
| 685 | 
            +
                1. Put the provided model to cpu
         | 
| 686 | 
            +
                2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
         | 
| 687 | 
            +
                3. Load it into the provided model
         | 
| 688 | 
            +
             | 
| 689 | 
            +
                Args:
         | 
| 690 | 
            +
                    - ``model``: the model object to update
         | 
| 691 | 
            +
                    - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
         | 
| 692 | 
            +
                    - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
         | 
| 693 | 
            +
             | 
| 694 | 
            +
                Returns:
         | 
| 695 | 
            +
                    - ``model`: modified model
         | 
| 696 | 
            +
             | 
| 697 | 
            +
                Make sure you have plenty of CPU memory available before you call this function. If you don't
         | 
| 698 | 
            +
                have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
         | 
| 699 | 
            +
                conveniently placed for you in the checkpoint folder.
         | 
| 700 | 
            +
             | 
| 701 | 
            +
                A typical usage might be ::
         | 
| 702 | 
            +
             | 
| 703 | 
            +
                    from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
         | 
| 704 | 
            +
                    model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
         | 
| 705 | 
            +
                    # submit to model hub or save the model to share with others
         | 
| 706 | 
            +
             | 
| 707 | 
            +
                Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
         | 
| 708 | 
            +
                of the same application. i.e. you will need to re-initialize the deepspeed engine, since
         | 
| 709 | 
            +
                ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
         | 
| 710 | 
            +
             | 
| 711 | 
            +
                """
         | 
| 712 | 
            +
                logger.info(f"Extracting fp32 weights")
         | 
| 713 | 
            +
                state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
         | 
| 714 | 
            +
             | 
| 715 | 
            +
                logger.info(f"Overwriting model with fp32 weights")
         | 
| 716 | 
            +
                model = model.cpu()
         | 
| 717 | 
            +
                model.load_state_dict(state_dict, strict=False)
         | 
| 718 | 
            +
             | 
| 719 | 
            +
                return model
         | 
| 720 | 
            +
             | 
| 721 | 
            +
             | 
| 722 | 
            +
            if __name__ == "__main__":
         | 
| 723 | 
            +
                parser = argparse.ArgumentParser()
         | 
| 724 | 
            +
                parser.add_argument("checkpoint_dir",
         | 
| 725 | 
            +
                                    type=str,
         | 
| 726 | 
            +
                                    help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
         | 
| 727 | 
            +
                parser.add_argument("output_dir",
         | 
| 728 | 
            +
                                    type=str,
         | 
| 729 | 
            +
                                    help="directory to the pytorch fp32 state_dict output files"
         | 
| 730 | 
            +
                                    "(e.g. path/checkpoint-12-output/)")
         | 
| 731 | 
            +
                parser.add_argument(
         | 
| 732 | 
            +
                    "--max_shard_size",
         | 
| 733 | 
            +
                    type=str,
         | 
| 734 | 
            +
                    default="5GB",
         | 
| 735 | 
            +
                    help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
         | 
| 736 | 
            +
                    "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
         | 
| 737 | 
            +
                    "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
         | 
| 738 | 
            +
                    "without CPU OOM issues.")
         | 
| 739 | 
            +
                parser.add_argument(
         | 
| 740 | 
            +
                    "--safe_serialization",
         | 
| 741 | 
            +
                    default=False,
         | 
| 742 | 
            +
                    action='store_true',
         | 
| 743 | 
            +
                    help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
         | 
| 744 | 
            +
                parser.add_argument("-t",
         | 
| 745 | 
            +
                                    "--tag",
         | 
| 746 | 
            +
                                    type=str,
         | 
| 747 | 
            +
                                    default=None,
         | 
| 748 | 
            +
                                    help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
         | 
| 749 | 
            +
                parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
         | 
| 750 | 
            +
                parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
         | 
| 751 | 
            +
                args = parser.parse_args()
         | 
| 752 | 
            +
             | 
| 753 | 
            +
                debug = args.debug
         | 
| 754 | 
            +
             | 
| 755 | 
            +
                convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
         | 
| 756 | 
            +
                                                           args.output_dir,
         | 
| 757 | 
            +
                                                           max_shard_size=args.max_shard_size,
         | 
| 758 | 
            +
                                                           safe_serialization=args.safe_serialization,
         | 
| 759 | 
            +
                                                           tag=args.tag,
         | 
| 760 | 
            +
                                                           exclude_frozen_parameters=args.exclude_frozen_parameters)
         | 
