Training in progress, step 100
Browse files- chat_template.jinja +5 -0
- config.json +60 -0
- model-00001-of-00007.safetensors +3 -0
- model-00002-of-00007.safetensors +3 -0
- model-00003-of-00007.safetensors +3 -0
- model-00004-of-00007.safetensors +3 -0
- model-00005-of-00007.safetensors +3 -0
- model-00006-of-00007.safetensors +3 -0
- model-00007-of-00007.safetensors +3 -0
- model.safetensors.index.json +0 -0
- special_tokens_map.json +17 -0
- tokenizer.json +0 -0
- tokenizer_config.json +163 -0
- training.log +711 -0
- training_args.bin +3 -0
    	
        chat_template.jinja
    ADDED
    
    | @@ -0,0 +1,5 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            ' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            ' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,60 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "architectures": [
         | 
| 3 | 
            +
                "DeepseekV2ForCausalLM"
         | 
| 4 | 
            +
              ],
         | 
| 5 | 
            +
              "attention_bias": false,
         | 
| 6 | 
            +
              "attention_dropout": 0.0,
         | 
| 7 | 
            +
              "auto_map": {
         | 
| 8 | 
            +
                "AutoConfig": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--configuration_deepseek.DeepseekV2Config",
         | 
| 9 | 
            +
                "AutoModel": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2Model",
         | 
| 10 | 
            +
                "AutoModelForCausalLM": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM"
         | 
| 11 | 
            +
              },
         | 
| 12 | 
            +
              "aux_loss_alpha": 0.001,
         | 
| 13 | 
            +
              "bos_token_id": 100000,
         | 
| 14 | 
            +
              "eos_token_id": 100001,
         | 
| 15 | 
            +
              "ep_size": 1,
         | 
| 16 | 
            +
              "first_k_dense_replace": 1,
         | 
| 17 | 
            +
              "hidden_act": "silu",
         | 
| 18 | 
            +
              "hidden_size": 2048,
         | 
| 19 | 
            +
              "initializer_range": 0.02,
         | 
| 20 | 
            +
              "intermediate_size": 10944,
         | 
| 21 | 
            +
              "kv_lora_rank": 512,
         | 
| 22 | 
            +
              "max_position_embeddings": 163840,
         | 
| 23 | 
            +
              "model_type": "deepseek_v2",
         | 
| 24 | 
            +
              "moe_intermediate_size": 1408,
         | 
| 25 | 
            +
              "moe_layer_freq": 1,
         | 
| 26 | 
            +
              "n_group": 1,
         | 
| 27 | 
            +
              "n_routed_experts": 64,
         | 
| 28 | 
            +
              "n_shared_experts": 2,
         | 
| 29 | 
            +
              "norm_topk_prob": false,
         | 
| 30 | 
            +
              "num_attention_heads": 16,
         | 
| 31 | 
            +
              "num_experts_per_tok": 6,
         | 
| 32 | 
            +
              "num_hidden_layers": 27,
         | 
| 33 | 
            +
              "num_key_value_heads": 16,
         | 
| 34 | 
            +
              "pretraining_tp": 1,
         | 
| 35 | 
            +
              "q_lora_rank": null,
         | 
| 36 | 
            +
              "qk_nope_head_dim": 128,
         | 
| 37 | 
            +
              "qk_rope_head_dim": 64,
         | 
| 38 | 
            +
              "rms_norm_eps": 1e-06,
         | 
| 39 | 
            +
              "rope_scaling": {
         | 
| 40 | 
            +
                "beta_fast": 32,
         | 
| 41 | 
            +
                "beta_slow": 1,
         | 
| 42 | 
            +
                "factor": 40,
         | 
| 43 | 
            +
                "mscale": 0.707,
         | 
| 44 | 
            +
                "mscale_all_dim": 0.707,
         | 
| 45 | 
            +
                "original_max_position_embeddings": 4096,
         | 
| 46 | 
            +
                "type": "yarn"
         | 
| 47 | 
            +
              },
         | 
| 48 | 
            +
              "rope_theta": 10000,
         | 
| 49 | 
            +
              "routed_scaling_factor": 1.0,
         | 
| 50 | 
            +
              "scoring_func": "softmax",
         | 
| 51 | 
            +
              "seq_aux": true,
         | 
| 52 | 
            +
              "tie_word_embeddings": false,
         | 
| 53 | 
            +
              "topk_group": 1,
         | 
| 54 | 
            +
              "topk_method": "greedy",
         | 
| 55 | 
            +
              "torch_dtype": "bfloat16",
         | 
| 56 | 
            +
              "transformers_version": "4.52.0.dev0",
         | 
| 57 | 
            +
              "use_cache": true,
         | 
| 58 | 
            +
              "v_head_dim": 128,
         | 
| 59 | 
            +
              "vocab_size": 102400
         | 
| 60 | 
            +
            }
         | 
    	
        model-00001-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:7bd14d78cb25bbeb4431999560a13af797d439942066f2011e6093767eae7595
         | 
| 3 | 
            +
            size 4994763632
         | 
    	
        model-00002-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fd3655b30003a3beb9becd5a2a2ed0cc0b4a25b34d91e95fc4dd4a82b3c3a44f
         | 
| 3 | 
            +
            size 4995044944
         | 
    	
        model-00003-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:48bda563dd6cb17ae86953452044f81f91340c8c869770bce766a6bfb5127f28
         | 
| 3 | 
            +
            size 4996085000
         | 
    	
        model-00004-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ef18b735257307e675a4f71d98be91d1de566b9d6d041a834159c209602ca181
         | 
| 3 | 
            +
            size 4996085224
         | 
    	
        model-00005-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ac08c02a049dbf1f8608f79bec22ec879c241017da163f445224b906dcf6e8a9
         | 
| 3 | 
            +
            size 4996085224
         | 
    	
        model-00006-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:2c774cf10ba1047ad9b878bf6d22bcf3a3479e6e76888676499992c37e9e0e5f
         | 
| 3 | 
            +
            size 4995045792
         | 
    	
        model-00007-of-00007.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:eee32011d40e90a8bf70a85c01cfd9c31b8f48e0a8c060c5c0cbb2159e11b800
         | 
| 3 | 
            +
            size 1440515736
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "content": "<|begin▁of▁sentence|>",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": true,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "eos_token": {
         | 
| 10 | 
            +
                "content": "<|end▁of▁sentence|>",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": true,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": "<|end▁of▁sentence|>"
         | 
| 17 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,163 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_bos_token": true,
         | 
| 3 | 
            +
              "add_eos_token": false,
         | 
| 4 | 
            +
              "add_prefix_space": null,
         | 
| 5 | 
            +
              "added_tokens_decoder": {
         | 
| 6 | 
            +
                "100000": {
         | 
| 7 | 
            +
                  "content": "<|begin▁of▁sentence|>",
         | 
| 8 | 
            +
                  "lstrip": false,
         | 
| 9 | 
            +
                  "normalized": true,
         | 
| 10 | 
            +
                  "rstrip": false,
         | 
| 11 | 
            +
                  "single_word": false,
         | 
| 12 | 
            +
                  "special": true
         | 
| 13 | 
            +
                },
         | 
| 14 | 
            +
                "100001": {
         | 
| 15 | 
            +
                  "content": "<|end▁of▁sentence|>",
         | 
| 16 | 
            +
                  "lstrip": false,
         | 
| 17 | 
            +
                  "normalized": true,
         | 
| 18 | 
            +
                  "rstrip": false,
         | 
| 19 | 
            +
                  "single_word": false,
         | 
| 20 | 
            +
                  "special": true
         | 
| 21 | 
            +
                },
         | 
| 22 | 
            +
                "100002": {
         | 
| 23 | 
            +
                  "content": "<|fim▁hole|>",
         | 
| 24 | 
            +
                  "lstrip": false,
         | 
| 25 | 
            +
                  "normalized": true,
         | 
| 26 | 
            +
                  "rstrip": false,
         | 
| 27 | 
            +
                  "single_word": false,
         | 
| 28 | 
            +
                  "special": false
         | 
| 29 | 
            +
                },
         | 
| 30 | 
            +
                "100003": {
         | 
| 31 | 
            +
                  "content": "<|fim▁begin|>",
         | 
| 32 | 
            +
                  "lstrip": false,
         | 
| 33 | 
            +
                  "normalized": true,
         | 
| 34 | 
            +
                  "rstrip": false,
         | 
| 35 | 
            +
                  "single_word": false,
         | 
| 36 | 
            +
                  "special": false
         | 
| 37 | 
            +
                },
         | 
| 38 | 
            +
                "100004": {
         | 
| 39 | 
            +
                  "content": "<|fim▁end|>",
         | 
| 40 | 
            +
                  "lstrip": false,
         | 
| 41 | 
            +
                  "normalized": true,
         | 
| 42 | 
            +
                  "rstrip": false,
         | 
| 43 | 
            +
                  "single_word": false,
         | 
| 44 | 
            +
                  "special": false
         | 
| 45 | 
            +
                },
         | 
| 46 | 
            +
                "100005": {
         | 
| 47 | 
            +
                  "content": "<|completion|>",
         | 
| 48 | 
            +
                  "lstrip": false,
         | 
| 49 | 
            +
                  "normalized": true,
         | 
| 50 | 
            +
                  "rstrip": false,
         | 
| 51 | 
            +
                  "single_word": false,
         | 
| 52 | 
            +
                  "special": false
         | 
| 53 | 
            +
                },
         | 
| 54 | 
            +
                "100006": {
         | 
| 55 | 
            +
                  "content": "<|User|>",
         | 
| 56 | 
            +
                  "lstrip": false,
         | 
| 57 | 
            +
                  "normalized": true,
         | 
| 58 | 
            +
                  "rstrip": false,
         | 
| 59 | 
            +
                  "single_word": false,
         | 
| 60 | 
            +
                  "special": false
         | 
| 61 | 
            +
                },
         | 
| 62 | 
            +
                "100007": {
         | 
| 63 | 
            +
                  "content": "<|Assistant|>",
         | 
| 64 | 
            +
                  "lstrip": false,
         | 
| 65 | 
            +
                  "normalized": true,
         | 
| 66 | 
            +
                  "rstrip": false,
         | 
| 67 | 
            +
                  "single_word": false,
         | 
| 68 | 
            +
                  "special": false
         | 
| 69 | 
            +
                },
         | 
| 70 | 
            +
                "100008": {
         | 
| 71 | 
            +
                  "content": "<|EOT|>",
         | 
| 72 | 
            +
                  "lstrip": false,
         | 
| 73 | 
            +
                  "normalized": true,
         | 
| 74 | 
            +
                  "rstrip": false,
         | 
| 75 | 
            +
                  "single_word": false,
         | 
| 76 | 
            +
                  "special": true
         | 
| 77 | 
            +
                },
         | 
| 78 | 
            +
                "100009": {
         | 
| 79 | 
            +
                  "content": "<|tool▁calls▁begin|>",
         | 
| 80 | 
            +
                  "lstrip": false,
         | 
| 81 | 
            +
                  "normalized": true,
         | 
| 82 | 
            +
                  "rstrip": false,
         | 
| 83 | 
            +
                  "single_word": false,
         | 
| 84 | 
            +
                  "special": false
         | 
| 85 | 
            +
                },
         | 
| 86 | 
            +
                "100010": {
         | 
| 87 | 
            +
                  "content": "<|tool▁calls▁end|>",
         | 
| 88 | 
            +
                  "lstrip": false,
         | 
| 89 | 
            +
                  "normalized": true,
         | 
| 90 | 
            +
                  "rstrip": false,
         | 
| 91 | 
            +
                  "single_word": false,
         | 
| 92 | 
            +
                  "special": false
         | 
| 93 | 
            +
                },
         | 
| 94 | 
            +
                "100011": {
         | 
| 95 | 
            +
                  "content": "<|tool▁call▁begin|>",
         | 
| 96 | 
            +
                  "lstrip": false,
         | 
| 97 | 
            +
                  "normalized": true,
         | 
| 98 | 
            +
                  "rstrip": false,
         | 
| 99 | 
            +
                  "single_word": false,
         | 
| 100 | 
            +
                  "special": false
         | 
| 101 | 
            +
                },
         | 
| 102 | 
            +
                "100012": {
         | 
| 103 | 
            +
                  "content": "<|tool▁call▁end|>",
         | 
| 104 | 
            +
                  "lstrip": false,
         | 
| 105 | 
            +
                  "normalized": true,
         | 
| 106 | 
            +
                  "rstrip": false,
         | 
| 107 | 
            +
                  "single_word": false,
         | 
| 108 | 
            +
                  "special": false
         | 
| 109 | 
            +
                },
         | 
| 110 | 
            +
                "100013": {
         | 
| 111 | 
            +
                  "content": "<|tool▁outputs▁begin|>",
         | 
| 112 | 
            +
                  "lstrip": false,
         | 
| 113 | 
            +
                  "normalized": true,
         | 
| 114 | 
            +
                  "rstrip": false,
         | 
| 115 | 
            +
                  "single_word": false,
         | 
| 116 | 
            +
                  "special": false
         | 
| 117 | 
            +
                },
         | 
| 118 | 
            +
                "100014": {
         | 
| 119 | 
            +
                  "content": "<|tool▁outputs▁end|>",
         | 
| 120 | 
            +
                  "lstrip": false,
         | 
| 121 | 
            +
                  "normalized": true,
         | 
| 122 | 
            +
                  "rstrip": false,
         | 
| 123 | 
            +
                  "single_word": false,
         | 
| 124 | 
            +
                  "special": false
         | 
| 125 | 
            +
                },
         | 
| 126 | 
            +
                "100015": {
         | 
| 127 | 
            +
                  "content": "<|tool▁output▁begin|>",
         | 
| 128 | 
            +
                  "lstrip": false,
         | 
| 129 | 
            +
                  "normalized": true,
         | 
| 130 | 
            +
                  "rstrip": false,
         | 
| 131 | 
            +
                  "single_word": false,
         | 
| 132 | 
            +
                  "special": false
         | 
| 133 | 
            +
                },
         | 
| 134 | 
            +
                "100016": {
         | 
| 135 | 
            +
                  "content": "<|tool▁output▁end|>",
         | 
| 136 | 
            +
                  "lstrip": false,
         | 
| 137 | 
            +
                  "normalized": true,
         | 
| 138 | 
            +
                  "rstrip": false,
         | 
| 139 | 
            +
                  "single_word": false,
         | 
| 140 | 
            +
                  "special": false
         | 
| 141 | 
            +
                },
         | 
| 142 | 
            +
                "100017": {
         | 
| 143 | 
            +
                  "content": "<|tool▁sep|>",
         | 
| 144 | 
            +
                  "lstrip": false,
         | 
| 145 | 
            +
                  "normalized": true,
         | 
| 146 | 
            +
                  "rstrip": false,
         | 
| 147 | 
            +
                  "single_word": false,
         | 
| 148 | 
            +
                  "special": false
         | 
| 149 | 
            +
                }
         | 
| 150 | 
            +
              },
         | 
| 151 | 
            +
              "bos_token": "<|begin▁of▁sentence|>",
         | 
| 152 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 153 | 
            +
              "eos_token": "<|end▁of▁sentence|>",
         | 
| 154 | 
            +
              "extra_special_tokens": {},
         | 
| 155 | 
            +
              "fast_tokenizer": true,
         | 
| 156 | 
            +
              "legacy": true,
         | 
| 157 | 
            +
              "model_max_length": 16384,
         | 
| 158 | 
            +
              "pad_token": "<|end▁of▁sentence|>",
         | 
| 159 | 
            +
              "sp_model_kwargs": {},
         | 
| 160 | 
            +
              "tokenizer_class": "LlamaTokenizerFast",
         | 
| 161 | 
            +
              "unk_token": null,
         | 
| 162 | 
            +
              "use_default_system_prompt": false
         | 
| 163 | 
            +
            }
         | 
    	
        training.log
    ADDED
    
    | @@ -0,0 +1,711 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            2025-07-09 02:58:07 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
         | 
| 2 | 
            +
            2025-07-09 02:58:07 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
         | 
| 3 | 
            +
            2025-07-09 02:58:07 - INFO - __main__ - Training parameters EfficientDistillationConfig(
         | 
| 4 | 
            +
            _n_gpu=1,
         | 
| 5 | 
            +
            accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
         | 
| 6 | 
            +
            adafactor=False,
         | 
| 7 | 
            +
            adam_beta1=0.9,
         | 
| 8 | 
            +
            adam_beta2=0.999,
         | 
| 9 | 
            +
            adam_epsilon=1e-08,
         | 
| 10 | 
            +
            alpha=0.5,
         | 
| 11 | 
            +
            auto_find_batch_size=False,
         | 
| 12 | 
            +
            average_tokens_across_devices=False,
         | 
| 13 | 
            +
            batch_eval_metrics=False,
         | 
| 14 | 
            +
            benchmarks=[],
         | 
| 15 | 
            +
            bf16=True,
         | 
| 16 | 
            +
            bf16_full_eval=False,
         | 
| 17 | 
            +
            callbacks=[],
         | 
| 18 | 
            +
            ce_loss_scale=1.0,
         | 
| 19 | 
            +
            chars_per_token=<CHARS_PER_TOKEN>,
         | 
| 20 | 
            +
            chat_template=None,
         | 
| 21 | 
            +
            completion_only_loss=None,
         | 
| 22 | 
            +
            data_seed=None,
         | 
| 23 | 
            +
            dataloader_drop_last=False,
         | 
| 24 | 
            +
            dataloader_num_workers=0,
         | 
| 25 | 
            +
            dataloader_persistent_workers=False,
         | 
| 26 | 
            +
            dataloader_pin_memory=True,
         | 
| 27 | 
            +
            dataloader_prefetch_factor=None,
         | 
| 28 | 
            +
            dataset_batch_size=None,
         | 
| 29 | 
            +
            dataset_kwargs=None,
         | 
| 30 | 
            +
            dataset_num_proc=None,
         | 
| 31 | 
            +
            dataset_text_field=text,
         | 
| 32 | 
            +
            ddp_backend=None,
         | 
| 33 | 
            +
            ddp_broadcast_buffers=None,
         | 
| 34 | 
            +
            ddp_bucket_cap_mb=None,
         | 
| 35 | 
            +
            ddp_find_unused_parameters=None,
         | 
| 36 | 
            +
            ddp_timeout=1800000000,
         | 
| 37 | 
            +
            debug=[],
         | 
| 38 | 
            +
            deepspeed=None,
         | 
| 39 | 
            +
            disable_dropout=True,
         | 
| 40 | 
            +
            disable_tqdm=False,
         | 
| 41 | 
            +
            do_eval=True,
         | 
| 42 | 
            +
            do_predict=False,
         | 
| 43 | 
            +
            do_train=False,
         | 
| 44 | 
            +
            eos_token=<EOS_TOKEN>,
         | 
| 45 | 
            +
            eval_accumulation_steps=None,
         | 
| 46 | 
            +
            eval_delay=0,
         | 
| 47 | 
            +
            eval_do_concat_batches=True,
         | 
| 48 | 
            +
            eval_on_start=False,
         | 
| 49 | 
            +
            eval_packing=None,
         | 
| 50 | 
            +
            eval_steps=None,
         | 
| 51 | 
            +
            eval_strategy=IntervalStrategy.NO,
         | 
| 52 | 
            +
            eval_use_gather_object=False,
         | 
| 53 | 
            +
            expert_num=6,
         | 
| 54 | 
            +
            fp16=False,
         | 
| 55 | 
            +
            fp16_backend=auto,
         | 
| 56 | 
            +
            fp16_full_eval=False,
         | 
| 57 | 
            +
            fp16_opt_level=O1,
         | 
| 58 | 
            +
            fsdp=[],
         | 
| 59 | 
            +
            fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
         | 
| 60 | 
            +
            fsdp_min_num_params=0,
         | 
| 61 | 
            +
            fsdp_transformer_layer_cls_to_wrap=None,
         | 
| 62 | 
            +
            full_determinism=False,
         | 
| 63 | 
            +
            gradient_accumulation_steps=1,
         | 
| 64 | 
            +
            gradient_checkpointing=False,
         | 
| 65 | 
            +
            gradient_checkpointing_kwargs={'use_reentrant': False},
         | 
| 66 | 
            +
            greater_is_better=None,
         | 
| 67 | 
            +
            group_by_length=False,
         | 
| 68 | 
            +
            half_precision_backend=auto,
         | 
| 69 | 
            +
            hub_always_push=False,
         | 
| 70 | 
            +
            hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-diff-info-Distill-token-specific-scale,
         | 
| 71 | 
            +
            hub_model_revision=main,
         | 
| 72 | 
            +
            hub_private_repo=None,
         | 
| 73 | 
            +
            hub_strategy=HubStrategy.EVERY_SAVE,
         | 
| 74 | 
            +
            hub_token=<HUB_TOKEN>,
         | 
| 75 | 
            +
            ignore_data_skip=False,
         | 
| 76 | 
            +
            include_for_metrics=[],
         | 
| 77 | 
            +
            include_inputs_for_metrics=False,
         | 
| 78 | 
            +
            include_num_input_tokens_seen=False,
         | 
| 79 | 
            +
            include_tokens_per_second=False,
         | 
| 80 | 
            +
            jit_mode_eval=False,
         | 
| 81 | 
            +
            kl_loss_scale=1.0,
         | 
| 82 | 
            +
            label_names=None,
         | 
| 83 | 
            +
            label_smoothing_factor=0.0,
         | 
| 84 | 
            +
            learning_rate=1e-05,
         | 
| 85 | 
            +
            length_column_name=length,
         | 
| 86 | 
            +
            lmbda=0.0,
         | 
| 87 | 
            +
            load_best_model_at_end=False,
         | 
| 88 | 
            +
            local_rank=0,
         | 
| 89 | 
            +
            log_level=info,
         | 
| 90 | 
            +
            log_level_replica=warning,
         | 
| 91 | 
            +
            log_on_each_node=True,
         | 
| 92 | 
            +
            logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale/runs/Jul09_02-58-05_haozeh-dev-pod-7,
         | 
| 93 | 
            +
            logging_first_step=False,
         | 
| 94 | 
            +
            logging_nan_inf_filter=True,
         | 
| 95 | 
            +
            logging_steps=1,
         | 
| 96 | 
            +
            logging_strategy=IntervalStrategy.STEPS,
         | 
| 97 | 
            +
            loss_type=token_specific,
         | 
| 98 | 
            +
            lr_scheduler_kwargs={'min_lr_rate': 0.1},
         | 
| 99 | 
            +
            lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
         | 
| 100 | 
            +
            max_grad_norm=1.0,
         | 
| 101 | 
            +
            max_length=8192,
         | 
| 102 | 
            +
            max_new_tokens=1024,
         | 
| 103 | 
            +
            max_seq_length=None,
         | 
| 104 | 
            +
            max_steps=-1,
         | 
| 105 | 
            +
            metric_for_best_model=None,
         | 
| 106 | 
            +
            model_init_kwargs=None,
         | 
| 107 | 
            +
            mp_parameters=,
         | 
| 108 | 
            +
            neftune_noise_alpha=None,
         | 
| 109 | 
            +
            no_cuda=False,
         | 
| 110 | 
            +
            num_of_sequences=None,
         | 
| 111 | 
            +
            num_train_epochs=3,
         | 
| 112 | 
            +
            optim=OptimizerNames.ADAMW_TORCH,
         | 
| 113 | 
            +
            optim_args=None,
         | 
| 114 | 
            +
            optim_target_modules=None,
         | 
| 115 | 
            +
            output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 116 | 
            +
            overwrite_hub_revision=False,
         | 
| 117 | 
            +
            overwrite_output_dir=True,
         | 
| 118 | 
            +
            packing=False,
         | 
| 119 | 
            +
            pad_to_multiple_of=None,
         | 
| 120 | 
            +
            pad_token=<PAD_TOKEN>,
         | 
| 121 | 
            +
            padding_free=False,
         | 
| 122 | 
            +
            past_index=-1,
         | 
| 123 | 
            +
            per_device_eval_batch_size=16,
         | 
| 124 | 
            +
            per_device_train_batch_size=4,
         | 
| 125 | 
            +
            prediction_loss_only=False,
         | 
| 126 | 
            +
            push_to_hub=True,
         | 
| 127 | 
            +
            push_to_hub_model_id=None,
         | 
| 128 | 
            +
            push_to_hub_organization=None,
         | 
| 129 | 
            +
            push_to_hub_revision=False,
         | 
| 130 | 
            +
            push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
         | 
| 131 | 
            +
            ray_scope=last,
         | 
| 132 | 
            +
            reduction=sum,
         | 
| 133 | 
            +
            remove_unused_columns=True,
         | 
| 134 | 
            +
            report_to=['wandb'],
         | 
| 135 | 
            +
            restore_callback_states_from_checkpoint=False,
         | 
| 136 | 
            +
            resume_from_checkpoint=None,
         | 
| 137 | 
            +
            run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 138 | 
            +
            save_on_each_node=False,
         | 
| 139 | 
            +
            save_only_model=False,
         | 
| 140 | 
            +
            save_safetensors=True,
         | 
| 141 | 
            +
            save_steps=100,
         | 
| 142 | 
            +
            save_strategy=SaveStrategy.STEPS,
         | 
| 143 | 
            +
            save_total_limit=1,
         | 
| 144 | 
            +
            seed=1234,
         | 
| 145 | 
            +
            skip_memory_metrics=True,
         | 
| 146 | 
            +
            system_prompt=None,
         | 
| 147 | 
            +
            teacher_model_init_kwargs=None,
         | 
| 148 | 
            +
            teacher_model_name_or_path=None,
         | 
| 149 | 
            +
            temperature=0.9,
         | 
| 150 | 
            +
            tf32=None,
         | 
| 151 | 
            +
            torch_compile=False,
         | 
| 152 | 
            +
            torch_compile_backend=None,
         | 
| 153 | 
            +
            torch_compile_mode=None,
         | 
| 154 | 
            +
            torch_empty_cache_steps=None,
         | 
| 155 | 
            +
            torchdynamo=None,
         | 
| 156 | 
            +
            tpu_metrics_debug=False,
         | 
| 157 | 
            +
            tpu_num_cores=None,
         | 
| 158 | 
            +
            use_cpu=False,
         | 
| 159 | 
            +
            use_ipex=False,
         | 
| 160 | 
            +
            use_legacy_prediction_loop=False,
         | 
| 161 | 
            +
            use_liger=False,
         | 
| 162 | 
            +
            use_liger_kernel=False,
         | 
| 163 | 
            +
            use_mps_device=False,
         | 
| 164 | 
            +
            wandb_entity=None,
         | 
| 165 | 
            +
            wandb_project=None,
         | 
| 166 | 
            +
            warmup_ratio=0.1,
         | 
| 167 | 
            +
            warmup_steps=0,
         | 
| 168 | 
            +
            weight_decay=0.0,
         | 
| 169 | 
            +
            )
         | 
| 170 | 
            +
            2025-07-09 02:58:26 - INFO - __main__ - *** Initializing model kwargs ***
         | 
| 171 | 
            +
            2025-07-09 02:58:26 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
         | 
| 172 | 
            +
            Memory reserved: 0.0
         | 
| 173 | 
            +
            2025-07-09 03:00:48 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625
         | 
| 174 | 
            +
            Memory reserved: 7322.0
         | 
| 175 | 
            +
            2025-07-09 03:00:48 - INFO - __main__ - MoE layers replaced with Dense MLP layers
         | 
| 176 | 
            +
            2025-07-09 03:00:48 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 4836.39697265625
         | 
| 177 | 
            +
            Memory reserved: 6442.0
         | 
| 178 | 
            +
            2025-07-09 03:00:48 - INFO - __main__ - Initializing EfficientDistillationTrainer...
         | 
| 179 | 
            +
            2025-07-09 03:01:20 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375
         | 
| 180 | 
            +
            Memory reserved: 12800.0
         | 
| 181 | 
            +
            2025-07-09 03:01:20 - INFO - __main__ - *** Starting training ***
         | 
| 182 | 
            +
            2025-07-09 03:01:20 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
         | 
| 183 | 
            +
              (model): DeepseekV2Model(
         | 
| 184 | 
            +
                (embed_tokens): Embedding(102400, 2048)
         | 
| 185 | 
            +
                (layers): ModuleList(
         | 
| 186 | 
            +
                  (0): DeepseekV2DecoderLayer(
         | 
| 187 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 188 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 189 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 190 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 191 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 192 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 193 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 194 | 
            +
                    )
         | 
| 195 | 
            +
                    (mlp): DeepseekV2MLP(
         | 
| 196 | 
            +
                      (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 197 | 
            +
                      (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 198 | 
            +
                      (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
         | 
| 199 | 
            +
                      (act_fn): SiLU()
         | 
| 200 | 
            +
                    )
         | 
| 201 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 202 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 203 | 
            +
                  )
         | 
| 204 | 
            +
                  (1-26): 26 x DeepseekV2DecoderLayer(
         | 
| 205 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 206 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 207 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 208 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 209 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 210 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 211 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 212 | 
            +
                    )
         | 
| 213 | 
            +
                    (mlp): DeepseekV2MoE(
         | 
| 214 | 
            +
                      (experts): ModuleList(
         | 
| 215 | 
            +
                        (0-63): 64 x DeepseekV2MLP(
         | 
| 216 | 
            +
                          (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 217 | 
            +
                          (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 218 | 
            +
                          (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
         | 
| 219 | 
            +
                          (act_fn): SiLU()
         | 
| 220 | 
            +
                        )
         | 
| 221 | 
            +
                      )
         | 
| 222 | 
            +
                      (gate): MoEGate()
         | 
| 223 | 
            +
                      (shared_experts): DeepseekV2MLP(
         | 
| 224 | 
            +
                        (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 225 | 
            +
                        (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 226 | 
            +
                        (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
         | 
| 227 | 
            +
                        (act_fn): SiLU()
         | 
| 228 | 
            +
                      )
         | 
| 229 | 
            +
                    )
         | 
| 230 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 231 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 232 | 
            +
                  )
         | 
| 233 | 
            +
                )
         | 
| 234 | 
            +
                (norm): DeepseekV2RMSNorm()
         | 
| 235 | 
            +
              )
         | 
| 236 | 
            +
              (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
         | 
| 237 | 
            +
            )
         | 
| 238 | 
            +
            2025-07-09 03:28:44 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
         | 
| 239 | 
            +
            2025-07-09 03:28:44 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
         | 
| 240 | 
            +
            2025-07-09 03:28:44 - INFO - __main__ - Training parameters EfficientDistillationConfig(
         | 
| 241 | 
            +
            _n_gpu=1,
         | 
| 242 | 
            +
            accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
         | 
| 243 | 
            +
            adafactor=False,
         | 
| 244 | 
            +
            adam_beta1=0.9,
         | 
| 245 | 
            +
            adam_beta2=0.999,
         | 
| 246 | 
            +
            adam_epsilon=1e-08,
         | 
| 247 | 
            +
            alpha=0.5,
         | 
| 248 | 
            +
            auto_find_batch_size=False,
         | 
| 249 | 
            +
            average_tokens_across_devices=False,
         | 
| 250 | 
            +
            batch_eval_metrics=False,
         | 
| 251 | 
            +
            benchmarks=[],
         | 
| 252 | 
            +
            bf16=True,
         | 
| 253 | 
            +
            bf16_full_eval=False,
         | 
| 254 | 
            +
            callbacks=[],
         | 
| 255 | 
            +
            ce_loss_scale=2.0,
         | 
| 256 | 
            +
            chars_per_token=<CHARS_PER_TOKEN>,
         | 
| 257 | 
            +
            chat_template=None,
         | 
| 258 | 
            +
            completion_only_loss=None,
         | 
| 259 | 
            +
            data_seed=None,
         | 
| 260 | 
            +
            dataloader_drop_last=False,
         | 
| 261 | 
            +
            dataloader_num_workers=0,
         | 
| 262 | 
            +
            dataloader_persistent_workers=False,
         | 
| 263 | 
            +
            dataloader_pin_memory=True,
         | 
| 264 | 
            +
            dataloader_prefetch_factor=None,
         | 
| 265 | 
            +
            dataset_batch_size=None,
         | 
| 266 | 
            +
            dataset_kwargs=None,
         | 
| 267 | 
            +
            dataset_num_proc=None,
         | 
| 268 | 
            +
            dataset_text_field=text,
         | 
| 269 | 
            +
            ddp_backend=None,
         | 
| 270 | 
            +
            ddp_broadcast_buffers=None,
         | 
| 271 | 
            +
            ddp_bucket_cap_mb=None,
         | 
| 272 | 
            +
            ddp_find_unused_parameters=None,
         | 
| 273 | 
            +
            ddp_timeout=1800000000,
         | 
| 274 | 
            +
            debug=[],
         | 
| 275 | 
            +
            deepspeed=None,
         | 
| 276 | 
            +
            disable_dropout=True,
         | 
| 277 | 
            +
            disable_tqdm=False,
         | 
| 278 | 
            +
            do_eval=True,
         | 
| 279 | 
            +
            do_predict=False,
         | 
| 280 | 
            +
            do_train=False,
         | 
| 281 | 
            +
            eos_token=<EOS_TOKEN>,
         | 
| 282 | 
            +
            eval_accumulation_steps=None,
         | 
| 283 | 
            +
            eval_delay=0,
         | 
| 284 | 
            +
            eval_do_concat_batches=True,
         | 
| 285 | 
            +
            eval_on_start=False,
         | 
| 286 | 
            +
            eval_packing=None,
         | 
| 287 | 
            +
            eval_steps=None,
         | 
| 288 | 
            +
            eval_strategy=IntervalStrategy.NO,
         | 
| 289 | 
            +
            eval_use_gather_object=False,
         | 
| 290 | 
            +
            expert_num=6,
         | 
| 291 | 
            +
            fp16=False,
         | 
| 292 | 
            +
            fp16_backend=auto,
         | 
| 293 | 
            +
            fp16_full_eval=False,
         | 
| 294 | 
            +
            fp16_opt_level=O1,
         | 
| 295 | 
            +
            fsdp=[],
         | 
| 296 | 
            +
            fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
         | 
| 297 | 
            +
            fsdp_min_num_params=0,
         | 
| 298 | 
            +
            fsdp_transformer_layer_cls_to_wrap=None,
         | 
| 299 | 
            +
            full_determinism=False,
         | 
| 300 | 
            +
            gradient_accumulation_steps=1,
         | 
| 301 | 
            +
            gradient_checkpointing=False,
         | 
| 302 | 
            +
            gradient_checkpointing_kwargs={'use_reentrant': False},
         | 
| 303 | 
            +
            greater_is_better=None,
         | 
| 304 | 
            +
            group_by_length=False,
         | 
| 305 | 
            +
            half_precision_backend=auto,
         | 
| 306 | 
            +
            hub_always_push=False,
         | 
| 307 | 
            +
            hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-diff-info-Distill-token-specific-scale,
         | 
| 308 | 
            +
            hub_model_revision=main,
         | 
| 309 | 
            +
            hub_private_repo=None,
         | 
| 310 | 
            +
            hub_strategy=HubStrategy.EVERY_SAVE,
         | 
| 311 | 
            +
            hub_token=<HUB_TOKEN>,
         | 
| 312 | 
            +
            ignore_data_skip=False,
         | 
| 313 | 
            +
            include_for_metrics=[],
         | 
| 314 | 
            +
            include_inputs_for_metrics=False,
         | 
| 315 | 
            +
            include_num_input_tokens_seen=False,
         | 
| 316 | 
            +
            include_tokens_per_second=False,
         | 
| 317 | 
            +
            jit_mode_eval=False,
         | 
| 318 | 
            +
            kl_loss_scale=1.0,
         | 
| 319 | 
            +
            label_names=None,
         | 
| 320 | 
            +
            label_smoothing_factor=0.0,
         | 
| 321 | 
            +
            learning_rate=1e-05,
         | 
| 322 | 
            +
            length_column_name=length,
         | 
| 323 | 
            +
            lmbda=0.0,
         | 
| 324 | 
            +
            load_best_model_at_end=False,
         | 
| 325 | 
            +
            local_rank=0,
         | 
| 326 | 
            +
            log_level=info,
         | 
| 327 | 
            +
            log_level_replica=warning,
         | 
| 328 | 
            +
            log_on_each_node=True,
         | 
| 329 | 
            +
            logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale/runs/Jul09_03-28-42_haozeh-dev-pod-7,
         | 
| 330 | 
            +
            logging_first_step=False,
         | 
| 331 | 
            +
            logging_nan_inf_filter=True,
         | 
| 332 | 
            +
            logging_steps=1,
         | 
| 333 | 
            +
            logging_strategy=IntervalStrategy.STEPS,
         | 
| 334 | 
            +
            loss_type=token_specific,
         | 
| 335 | 
            +
            lr_scheduler_kwargs={'min_lr_rate': 0.1},
         | 
| 336 | 
            +
            lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
         | 
| 337 | 
            +
            max_grad_norm=1.0,
         | 
| 338 | 
            +
            max_length=8192,
         | 
| 339 | 
            +
            max_new_tokens=1024,
         | 
| 340 | 
            +
            max_seq_length=None,
         | 
| 341 | 
            +
            max_steps=-1,
         | 
| 342 | 
            +
            metric_for_best_model=None,
         | 
| 343 | 
            +
            model_init_kwargs=None,
         | 
| 344 | 
            +
            mp_parameters=,
         | 
| 345 | 
            +
            neftune_noise_alpha=None,
         | 
| 346 | 
            +
            no_cuda=False,
         | 
| 347 | 
            +
            num_of_sequences=None,
         | 
| 348 | 
            +
            num_train_epochs=3,
         | 
| 349 | 
            +
            optim=OptimizerNames.ADAMW_TORCH,
         | 
| 350 | 
            +
            optim_args=None,
         | 
| 351 | 
            +
            optim_target_modules=None,
         | 
| 352 | 
            +
            output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 353 | 
            +
            overwrite_hub_revision=False,
         | 
| 354 | 
            +
            overwrite_output_dir=True,
         | 
| 355 | 
            +
            packing=False,
         | 
| 356 | 
            +
            pad_to_multiple_of=None,
         | 
| 357 | 
            +
            pad_token=<PAD_TOKEN>,
         | 
| 358 | 
            +
            padding_free=False,
         | 
| 359 | 
            +
            past_index=-1,
         | 
| 360 | 
            +
            per_device_eval_batch_size=16,
         | 
| 361 | 
            +
            per_device_train_batch_size=16,
         | 
| 362 | 
            +
            prediction_loss_only=False,
         | 
| 363 | 
            +
            push_to_hub=True,
         | 
| 364 | 
            +
            push_to_hub_model_id=None,
         | 
| 365 | 
            +
            push_to_hub_organization=None,
         | 
| 366 | 
            +
            push_to_hub_revision=False,
         | 
| 367 | 
            +
            push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
         | 
| 368 | 
            +
            ray_scope=last,
         | 
| 369 | 
            +
            reduction=sum,
         | 
| 370 | 
            +
            remove_unused_columns=True,
         | 
| 371 | 
            +
            report_to=['wandb'],
         | 
| 372 | 
            +
            restore_callback_states_from_checkpoint=False,
         | 
| 373 | 
            +
            resume_from_checkpoint=None,
         | 
| 374 | 
            +
            run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 375 | 
            +
            save_on_each_node=False,
         | 
| 376 | 
            +
            save_only_model=False,
         | 
| 377 | 
            +
            save_safetensors=True,
         | 
| 378 | 
            +
            save_steps=100,
         | 
| 379 | 
            +
            save_strategy=SaveStrategy.STEPS,
         | 
| 380 | 
            +
            save_total_limit=1,
         | 
| 381 | 
            +
            seed=1234,
         | 
| 382 | 
            +
            skip_memory_metrics=True,
         | 
| 383 | 
            +
            system_prompt=None,
         | 
| 384 | 
            +
            teacher_model_init_kwargs=None,
         | 
| 385 | 
            +
            teacher_model_name_or_path=None,
         | 
| 386 | 
            +
            temperature=0.9,
         | 
| 387 | 
            +
            tf32=None,
         | 
| 388 | 
            +
            torch_compile=False,
         | 
| 389 | 
            +
            torch_compile_backend=None,
         | 
| 390 | 
            +
            torch_compile_mode=None,
         | 
| 391 | 
            +
            torch_empty_cache_steps=None,
         | 
| 392 | 
            +
            torchdynamo=None,
         | 
| 393 | 
            +
            tpu_metrics_debug=False,
         | 
| 394 | 
            +
            tpu_num_cores=None,
         | 
| 395 | 
            +
            use_cpu=False,
         | 
| 396 | 
            +
            use_ipex=False,
         | 
| 397 | 
            +
            use_legacy_prediction_loop=False,
         | 
| 398 | 
            +
            use_liger=False,
         | 
| 399 | 
            +
            use_liger_kernel=False,
         | 
| 400 | 
            +
            use_mps_device=False,
         | 
| 401 | 
            +
            wandb_entity=None,
         | 
| 402 | 
            +
            wandb_project=None,
         | 
| 403 | 
            +
            warmup_ratio=0.1,
         | 
| 404 | 
            +
            warmup_steps=0,
         | 
| 405 | 
            +
            weight_decay=0.0,
         | 
| 406 | 
            +
            )
         | 
| 407 | 
            +
            2025-07-09 03:28:53 - INFO - __main__ - *** Initializing model kwargs ***
         | 
| 408 | 
            +
            2025-07-09 03:28:53 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
         | 
| 409 | 
            +
            Memory reserved: 0.0
         | 
| 410 | 
            +
            2025-07-09 03:30:07 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625
         | 
| 411 | 
            +
            Memory reserved: 7322.0
         | 
| 412 | 
            +
            2025-07-09 03:30:08 - INFO - __main__ - MoE layers replaced with Dense MLP layers
         | 
| 413 | 
            +
            2025-07-09 03:30:08 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 4836.39697265625
         | 
| 414 | 
            +
            Memory reserved: 6442.0
         | 
| 415 | 
            +
            2025-07-09 03:30:08 - INFO - __main__ - Initializing EfficientDistillationTrainer...
         | 
| 416 | 
            +
            2025-07-09 03:30:30 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375
         | 
| 417 | 
            +
            Memory reserved: 12800.0
         | 
| 418 | 
            +
            2025-07-09 03:30:30 - INFO - __main__ - *** Starting training ***
         | 
| 419 | 
            +
            2025-07-09 03:30:30 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
         | 
| 420 | 
            +
              (model): DeepseekV2Model(
         | 
| 421 | 
            +
                (embed_tokens): Embedding(102400, 2048)
         | 
| 422 | 
            +
                (layers): ModuleList(
         | 
| 423 | 
            +
                  (0): DeepseekV2DecoderLayer(
         | 
| 424 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 425 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 426 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 427 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 428 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 429 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 430 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 431 | 
            +
                    )
         | 
| 432 | 
            +
                    (mlp): DeepseekV2MLP(
         | 
| 433 | 
            +
                      (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 434 | 
            +
                      (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 435 | 
            +
                      (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
         | 
| 436 | 
            +
                      (act_fn): SiLU()
         | 
| 437 | 
            +
                    )
         | 
| 438 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 439 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 440 | 
            +
                  )
         | 
| 441 | 
            +
                  (1-26): 26 x DeepseekV2DecoderLayer(
         | 
| 442 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 443 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 444 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 445 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 446 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 447 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 448 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 449 | 
            +
                    )
         | 
| 450 | 
            +
                    (mlp): DeepseekV2MoE(
         | 
| 451 | 
            +
                      (experts): ModuleList(
         | 
| 452 | 
            +
                        (0-63): 64 x DeepseekV2MLP(
         | 
| 453 | 
            +
                          (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 454 | 
            +
                          (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 455 | 
            +
                          (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
         | 
| 456 | 
            +
                          (act_fn): SiLU()
         | 
| 457 | 
            +
                        )
         | 
| 458 | 
            +
                      )
         | 
| 459 | 
            +
                      (gate): MoEGate()
         | 
| 460 | 
            +
                      (shared_experts): DeepseekV2MLP(
         | 
| 461 | 
            +
                        (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 462 | 
            +
                        (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 463 | 
            +
                        (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
         | 
| 464 | 
            +
                        (act_fn): SiLU()
         | 
| 465 | 
            +
                      )
         | 
| 466 | 
            +
                    )
         | 
| 467 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 468 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 469 | 
            +
                  )
         | 
| 470 | 
            +
                )
         | 
| 471 | 
            +
                (norm): DeepseekV2RMSNorm()
         | 
| 472 | 
            +
              )
         | 
| 473 | 
            +
              (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
         | 
| 474 | 
            +
            )
         | 
| 475 | 
            +
            2025-07-09 06:55:32 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
         | 
| 476 | 
            +
            2025-07-09 06:55:32 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
         | 
| 477 | 
            +
            2025-07-09 06:55:32 - INFO - __main__ - Training parameters EfficientDistillationConfig(
         | 
| 478 | 
            +
            _n_gpu=1,
         | 
| 479 | 
            +
            accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
         | 
| 480 | 
            +
            adafactor=False,
         | 
| 481 | 
            +
            adam_beta1=0.9,
         | 
| 482 | 
            +
            adam_beta2=0.999,
         | 
| 483 | 
            +
            adam_epsilon=1e-08,
         | 
| 484 | 
            +
            alpha=0.5,
         | 
| 485 | 
            +
            auto_find_batch_size=False,
         | 
| 486 | 
            +
            average_tokens_across_devices=False,
         | 
| 487 | 
            +
            batch_eval_metrics=False,
         | 
| 488 | 
            +
            benchmarks=[],
         | 
| 489 | 
            +
            bf16=True,
         | 
| 490 | 
            +
            bf16_full_eval=False,
         | 
| 491 | 
            +
            callbacks=[],
         | 
| 492 | 
            +
            ce_loss_scale=2.0,
         | 
| 493 | 
            +
            chars_per_token=<CHARS_PER_TOKEN>,
         | 
| 494 | 
            +
            chat_template=None,
         | 
| 495 | 
            +
            completion_only_loss=None,
         | 
| 496 | 
            +
            data_seed=None,
         | 
| 497 | 
            +
            dataloader_drop_last=False,
         | 
| 498 | 
            +
            dataloader_num_workers=0,
         | 
| 499 | 
            +
            dataloader_persistent_workers=False,
         | 
| 500 | 
            +
            dataloader_pin_memory=True,
         | 
| 501 | 
            +
            dataloader_prefetch_factor=None,
         | 
| 502 | 
            +
            dataset_batch_size=None,
         | 
| 503 | 
            +
            dataset_kwargs=None,
         | 
| 504 | 
            +
            dataset_num_proc=None,
         | 
| 505 | 
            +
            dataset_text_field=text,
         | 
| 506 | 
            +
            ddp_backend=None,
         | 
| 507 | 
            +
            ddp_broadcast_buffers=None,
         | 
| 508 | 
            +
            ddp_bucket_cap_mb=None,
         | 
| 509 | 
            +
            ddp_find_unused_parameters=None,
         | 
| 510 | 
            +
            ddp_timeout=1800000000,
         | 
| 511 | 
            +
            debug=[],
         | 
| 512 | 
            +
            deepspeed=None,
         | 
| 513 | 
            +
            disable_dropout=True,
         | 
| 514 | 
            +
            disable_tqdm=False,
         | 
| 515 | 
            +
            do_eval=True,
         | 
| 516 | 
            +
            do_predict=False,
         | 
| 517 | 
            +
            do_train=False,
         | 
| 518 | 
            +
            eos_token=<EOS_TOKEN>,
         | 
| 519 | 
            +
            eval_accumulation_steps=None,
         | 
| 520 | 
            +
            eval_delay=0,
         | 
| 521 | 
            +
            eval_do_concat_batches=True,
         | 
| 522 | 
            +
            eval_on_start=False,
         | 
| 523 | 
            +
            eval_packing=None,
         | 
| 524 | 
            +
            eval_steps=None,
         | 
| 525 | 
            +
            eval_strategy=IntervalStrategy.NO,
         | 
| 526 | 
            +
            eval_use_gather_object=False,
         | 
| 527 | 
            +
            expert_num=6,
         | 
| 528 | 
            +
            fp16=False,
         | 
| 529 | 
            +
            fp16_backend=auto,
         | 
| 530 | 
            +
            fp16_full_eval=False,
         | 
| 531 | 
            +
            fp16_opt_level=O1,
         | 
| 532 | 
            +
            fsdp=[],
         | 
| 533 | 
            +
            fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
         | 
| 534 | 
            +
            fsdp_min_num_params=0,
         | 
| 535 | 
            +
            fsdp_transformer_layer_cls_to_wrap=None,
         | 
| 536 | 
            +
            full_determinism=False,
         | 
| 537 | 
            +
            gradient_accumulation_steps=1,
         | 
| 538 | 
            +
            gradient_checkpointing=False,
         | 
| 539 | 
            +
            gradient_checkpointing_kwargs={'use_reentrant': False},
         | 
| 540 | 
            +
            greater_is_better=None,
         | 
| 541 | 
            +
            group_by_length=False,
         | 
| 542 | 
            +
            half_precision_backend=auto,
         | 
| 543 | 
            +
            hub_always_push=False,
         | 
| 544 | 
            +
            hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-diff-info-Distill-token-specific-scale,
         | 
| 545 | 
            +
            hub_model_revision=main,
         | 
| 546 | 
            +
            hub_private_repo=None,
         | 
| 547 | 
            +
            hub_strategy=HubStrategy.EVERY_SAVE,
         | 
| 548 | 
            +
            hub_token=<HUB_TOKEN>,
         | 
| 549 | 
            +
            ignore_data_skip=False,
         | 
| 550 | 
            +
            include_for_metrics=[],
         | 
| 551 | 
            +
            include_inputs_for_metrics=False,
         | 
| 552 | 
            +
            include_num_input_tokens_seen=False,
         | 
| 553 | 
            +
            include_tokens_per_second=False,
         | 
| 554 | 
            +
            jit_mode_eval=False,
         | 
| 555 | 
            +
            kl_loss_scale=1.0,
         | 
| 556 | 
            +
            label_names=None,
         | 
| 557 | 
            +
            label_smoothing_factor=0.0,
         | 
| 558 | 
            +
            learning_rate=1e-05,
         | 
| 559 | 
            +
            length_column_name=length,
         | 
| 560 | 
            +
            lmbda=0.0,
         | 
| 561 | 
            +
            load_best_model_at_end=False,
         | 
| 562 | 
            +
            local_rank=0,
         | 
| 563 | 
            +
            log_level=info,
         | 
| 564 | 
            +
            log_level_replica=warning,
         | 
| 565 | 
            +
            log_on_each_node=True,
         | 
| 566 | 
            +
            logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale/runs/Jul09_06-55-31_haozeh-dev-pod-7,
         | 
| 567 | 
            +
            logging_first_step=False,
         | 
| 568 | 
            +
            logging_nan_inf_filter=True,
         | 
| 569 | 
            +
            logging_steps=1,
         | 
| 570 | 
            +
            logging_strategy=IntervalStrategy.STEPS,
         | 
| 571 | 
            +
            loss_type=token_specific,
         | 
| 572 | 
            +
            lr_scheduler_kwargs={'min_lr_rate': 0.1},
         | 
| 573 | 
            +
            lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
         | 
| 574 | 
            +
            max_grad_norm=1.0,
         | 
| 575 | 
            +
            max_length=8192,
         | 
| 576 | 
            +
            max_new_tokens=1024,
         | 
| 577 | 
            +
            max_seq_length=None,
         | 
| 578 | 
            +
            max_steps=-1,
         | 
| 579 | 
            +
            metric_for_best_model=None,
         | 
| 580 | 
            +
            model_init_kwargs=None,
         | 
| 581 | 
            +
            mp_parameters=,
         | 
| 582 | 
            +
            neftune_noise_alpha=None,
         | 
| 583 | 
            +
            no_cuda=False,
         | 
| 584 | 
            +
            num_of_sequences=None,
         | 
| 585 | 
            +
            num_train_epochs=3,
         | 
| 586 | 
            +
            optim=OptimizerNames.ADAMW_TORCH,
         | 
| 587 | 
            +
            optim_args=None,
         | 
| 588 | 
            +
            optim_target_modules=None,
         | 
| 589 | 
            +
            output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 590 | 
            +
            overwrite_hub_revision=False,
         | 
| 591 | 
            +
            overwrite_output_dir=True,
         | 
| 592 | 
            +
            packing=False,
         | 
| 593 | 
            +
            pad_to_multiple_of=None,
         | 
| 594 | 
            +
            pad_token=<PAD_TOKEN>,
         | 
| 595 | 
            +
            padding_free=False,
         | 
| 596 | 
            +
            past_index=-1,
         | 
| 597 | 
            +
            per_device_eval_batch_size=16,
         | 
| 598 | 
            +
            per_device_train_batch_size=8,
         | 
| 599 | 
            +
            prediction_loss_only=False,
         | 
| 600 | 
            +
            push_to_hub=True,
         | 
| 601 | 
            +
            push_to_hub_model_id=None,
         | 
| 602 | 
            +
            push_to_hub_organization=None,
         | 
| 603 | 
            +
            push_to_hub_revision=False,
         | 
| 604 | 
            +
            push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
         | 
| 605 | 
            +
            ray_scope=last,
         | 
| 606 | 
            +
            reduction=sum,
         | 
| 607 | 
            +
            remove_unused_columns=True,
         | 
| 608 | 
            +
            report_to=['wandb'],
         | 
| 609 | 
            +
            restore_callback_states_from_checkpoint=False,
         | 
| 610 | 
            +
            resume_from_checkpoint=None,
         | 
| 611 | 
            +
            run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/diff_info_distill_token_specific_scale,
         | 
| 612 | 
            +
            save_on_each_node=False,
         | 
| 613 | 
            +
            save_only_model=False,
         | 
| 614 | 
            +
            save_safetensors=True,
         | 
| 615 | 
            +
            save_steps=100,
         | 
| 616 | 
            +
            save_strategy=SaveStrategy.STEPS,
         | 
| 617 | 
            +
            save_total_limit=1,
         | 
| 618 | 
            +
            seed=1234,
         | 
| 619 | 
            +
            skip_memory_metrics=True,
         | 
| 620 | 
            +
            system_prompt=None,
         | 
| 621 | 
            +
            teacher_model_init_kwargs=None,
         | 
| 622 | 
            +
            teacher_model_name_or_path=None,
         | 
| 623 | 
            +
            temperature=0.9,
         | 
| 624 | 
            +
            tf32=None,
         | 
| 625 | 
            +
            torch_compile=False,
         | 
| 626 | 
            +
            torch_compile_backend=None,
         | 
| 627 | 
            +
            torch_compile_mode=None,
         | 
| 628 | 
            +
            torch_empty_cache_steps=None,
         | 
| 629 | 
            +
            torchdynamo=None,
         | 
| 630 | 
            +
            tpu_metrics_debug=False,
         | 
| 631 | 
            +
            tpu_num_cores=None,
         | 
| 632 | 
            +
            use_cpu=False,
         | 
| 633 | 
            +
            use_ipex=False,
         | 
| 634 | 
            +
            use_legacy_prediction_loop=False,
         | 
| 635 | 
            +
            use_liger=False,
         | 
| 636 | 
            +
            use_liger_kernel=False,
         | 
| 637 | 
            +
            use_mps_device=False,
         | 
| 638 | 
            +
            wandb_entity=None,
         | 
| 639 | 
            +
            wandb_project=None,
         | 
| 640 | 
            +
            warmup_ratio=0.1,
         | 
| 641 | 
            +
            warmup_steps=0,
         | 
| 642 | 
            +
            weight_decay=0.0,
         | 
| 643 | 
            +
            )
         | 
| 644 | 
            +
            2025-07-09 06:55:50 - INFO - __main__ - *** Initializing model kwargs ***
         | 
| 645 | 
            +
            2025-07-09 06:55:50 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
         | 
| 646 | 
            +
            Memory reserved: 0.0
         | 
| 647 | 
            +
            2025-07-09 06:57:14 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625
         | 
| 648 | 
            +
            Memory reserved: 7322.0
         | 
| 649 | 
            +
            2025-07-09 06:57:15 - INFO - __main__ - MoE layers replaced with Dense MLP layers
         | 
| 650 | 
            +
            2025-07-09 06:57:15 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 4836.39697265625
         | 
| 651 | 
            +
            Memory reserved: 6442.0
         | 
| 652 | 
            +
            2025-07-09 06:57:15 - INFO - __main__ - Initializing EfficientDistillationTrainer...
         | 
| 653 | 
            +
            2025-07-09 06:57:36 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375
         | 
| 654 | 
            +
            Memory reserved: 12800.0
         | 
| 655 | 
            +
            2025-07-09 06:57:36 - INFO - __main__ - *** Starting training ***
         | 
| 656 | 
            +
            2025-07-09 06:57:36 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
         | 
| 657 | 
            +
              (model): DeepseekV2Model(
         | 
| 658 | 
            +
                (embed_tokens): Embedding(102400, 2048)
         | 
| 659 | 
            +
                (layers): ModuleList(
         | 
| 660 | 
            +
                  (0): DeepseekV2DecoderLayer(
         | 
| 661 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 662 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 663 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 664 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 665 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 666 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 667 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 668 | 
            +
                    )
         | 
| 669 | 
            +
                    (mlp): DeepseekV2MLP(
         | 
| 670 | 
            +
                      (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 671 | 
            +
                      (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
         | 
| 672 | 
            +
                      (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
         | 
| 673 | 
            +
                      (act_fn): SiLU()
         | 
| 674 | 
            +
                    )
         | 
| 675 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 676 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 677 | 
            +
                  )
         | 
| 678 | 
            +
                  (1-26): 26 x DeepseekV2DecoderLayer(
         | 
| 679 | 
            +
                    (self_attn): DeepseekV2FlashAttention2(
         | 
| 680 | 
            +
                      (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
         | 
| 681 | 
            +
                      (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
         | 
| 682 | 
            +
                      (kv_a_layernorm): DeepseekV2RMSNorm()
         | 
| 683 | 
            +
                      (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
         | 
| 684 | 
            +
                      (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
         | 
| 685 | 
            +
                      (rotary_emb): DeepseekV2YarnRotaryEmbedding()
         | 
| 686 | 
            +
                    )
         | 
| 687 | 
            +
                    (mlp): DeepseekV2MoE(
         | 
| 688 | 
            +
                      (experts): ModuleList(
         | 
| 689 | 
            +
                        (0-63): 64 x DeepseekV2MLP(
         | 
| 690 | 
            +
                          (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 691 | 
            +
                          (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
         | 
| 692 | 
            +
                          (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
         | 
| 693 | 
            +
                          (act_fn): SiLU()
         | 
| 694 | 
            +
                        )
         | 
| 695 | 
            +
                      )
         | 
| 696 | 
            +
                      (gate): MoEGate()
         | 
| 697 | 
            +
                      (shared_experts): DeepseekV2MLP(
         | 
| 698 | 
            +
                        (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 699 | 
            +
                        (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
         | 
| 700 | 
            +
                        (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
         | 
| 701 | 
            +
                        (act_fn): SiLU()
         | 
| 702 | 
            +
                      )
         | 
| 703 | 
            +
                    )
         | 
| 704 | 
            +
                    (input_layernorm): DeepseekV2RMSNorm()
         | 
| 705 | 
            +
                    (post_attention_layernorm): DeepseekV2RMSNorm()
         | 
| 706 | 
            +
                  )
         | 
| 707 | 
            +
                )
         | 
| 708 | 
            +
                (norm): DeepseekV2RMSNorm()
         | 
| 709 | 
            +
              )
         | 
| 710 | 
            +
              (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
         | 
| 711 | 
            +
            )
         | 
    	
        training_args.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0cd385fa93f0564f06a7b04c4a90bd07b25153ad0a0b3e3e7ae6335434601c69
         | 
| 3 | 
            +
            size 8184
         |