minpeter commited on
Commit
5a91cb1
·
verified ·
1 Parent(s): f9156b3

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,11 +1,10 @@
1
  {
2
  "architectures": [
3
- "Qwen2MoeForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 31989,
8
- "decoder_sparse_step": 1,
9
  "eos_token_id": 31989,
10
  "head_dim": 64,
11
  "hidden_act": "silu",
@@ -13,31 +12,25 @@
13
  "initializer_range": 0.036084391824351615,
14
  "intermediate_size": 1920,
15
  "max_position_embeddings": 8192,
16
- "max_window_layers": 28,
17
  "mlp_bias": false,
18
- "mlp_only_layers": [],
19
- "model_type": "qwen2_moe",
20
- "moe_intermediate_size": 1920,
21
- "norm_topk_prob": true,
22
  "num_attention_heads": 12,
23
- "num_experts": 3,
24
  "num_experts_per_tok": 2,
25
  "num_hidden_layers": 27,
26
  "num_key_value_heads": 4,
 
27
  "output_router_logits": false,
28
  "pad_token_id": 31989,
29
  "pretraining_tp": 1,
30
- "qkv_bias": true,
31
  "rms_norm_eps": 1e-06,
32
  "rope_scaling": null,
33
  "rope_theta": 1000000.0,
34
  "router_aux_loss_coef": 0.001,
35
- "shared_expert_intermediate_size": 1920,
36
  "sliding_window": null,
37
  "tie_word_embeddings": true,
38
  "torch_dtype": "bfloat16",
39
  "transformers_version": "4.53.3",
40
  "use_cache": false,
41
- "use_sliding_window": false,
42
  "vocab_size": 32000
43
  }
 
1
  {
2
  "architectures": [
3
+ "MixtralForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 31989,
 
8
  "eos_token_id": 31989,
9
  "head_dim": 64,
10
  "hidden_act": "silu",
 
12
  "initializer_range": 0.036084391824351615,
13
  "intermediate_size": 1920,
14
  "max_position_embeddings": 8192,
 
15
  "mlp_bias": false,
16
+ "model_type": "mixtral",
 
 
 
17
  "num_attention_heads": 12,
 
18
  "num_experts_per_tok": 2,
19
  "num_hidden_layers": 27,
20
  "num_key_value_heads": 4,
21
+ "num_local_experts": 4,
22
  "output_router_logits": false,
23
  "pad_token_id": 31989,
24
  "pretraining_tp": 1,
 
25
  "rms_norm_eps": 1e-06,
26
  "rope_scaling": null,
27
  "rope_theta": 1000000.0,
28
  "router_aux_loss_coef": 0.001,
29
+ "router_jitter_noise": 0.0,
30
  "sliding_window": null,
31
  "tie_word_embeddings": true,
32
  "torch_dtype": "bfloat16",
33
  "transformers_version": "4.53.3",
34
  "use_cache": false,
 
35
  "vocab_size": 32000
36
  }
mergekit_moe_config.yml CHANGED
@@ -1,7 +1,6 @@
1
  base_model: minpeter/tiny-ko-187m-base-250725
2
  gate_mode: hidden
3
  dtype: bfloat16
4
- architecture: qwen
5
  experts:
6
  - source_model: minpeter/tiny-ko-187m-base-250725
7
  positive_prompts:
@@ -15,7 +14,6 @@ experts:
15
  - "[Genres: Science Fiction]\n[Tags: humor, old school, sci fi]"
16
  - "> get ye flask"
17
  - "[Mode: Interactive Storyteller]"
18
- shared_experts:
19
  - source_model: minpeter/tiny-ko-187m-base-250725
20
  positive_prompts:
21
  - "<|im_start|>user\nWie geht es dir?<|im_end|>"
 
1
  base_model: minpeter/tiny-ko-187m-base-250725
2
  gate_mode: hidden
3
  dtype: bfloat16
 
4
  experts:
5
  - source_model: minpeter/tiny-ko-187m-base-250725
6
  positive_prompts:
 
14
  - "[Genres: Science Fiction]\n[Tags: humor, old school, sci fi]"
15
  - "> get ye flask"
16
  - "[Mode: Interactive Storyteller]"
 
17
  - source_model: minpeter/tiny-ko-187m-base-250725
18
  positive_prompts:
19
  - "<|im_start|>user\nWie geht es dir?<|im_end|>"
mixtral-patch.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2025 Arcee AI
2
+ #
3
+ # This software is free software: you can redistribute it and/or
4
+ # modify it under the terms of the GNU Lesser General Public License as
5
+ # published by the Free Software Foundation, either version 3 of the
6
+ # License, or (at your option) any later version.
7
+ #
8
+ # This software is distributed in the hope that it will be useful, but
9
+ # WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
+ # Lesser General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU Lesser General Public License
14
+ # along with this program. If not, see http://www.gnu.org/licenses/.
15
+
16
+ import logging
17
+ from typing import List, Optional
18
+
19
+ import torch
20
+ import tqdm
21
+ import transformers
22
+
23
+ from mergekit.architecture import MISTRAL_INFO, WeightInfo
24
+ from mergekit.moe.arch import MoEOutputArchitecture
25
+ from mergekit.moe.common import copy_tensor_out, initialize_io, select_dtype
26
+ from mergekit.moe.config import MoEMergeConfig
27
+ from mergekit.options import MergeOptions
28
+
29
+
30
+ class MixtralMoE(MoEOutputArchitecture):
31
+ def name(self) -> str:
32
+ return "Mixtral"
33
+
34
+ def supports_config(
35
+ self,
36
+ config: MoEMergeConfig,
37
+ explain: bool = False,
38
+ trust_remote_code: bool = False,
39
+ ) -> bool:
40
+ if config.shared_experts:
41
+ if explain:
42
+ logging.warning("Mixtral does not support shared experts")
43
+ return False
44
+
45
+ model_types = []
46
+ for model_ref in [config.base_model] + [e.source_model for e in config.experts]:
47
+ model_cfg = model_ref.config(trust_remote_code=trust_remote_code)
48
+ model_types.append(model_cfg.model_type)
49
+
50
+ if len(set(model_types)) != 1:
51
+ if explain:
52
+ logging.warning(
53
+ "Mixtral requires all input models to have the same architecture"
54
+ )
55
+ return False
56
+ if model_types[0] not in ("llama", "mistral"):
57
+ if explain:
58
+ logging.warning(
59
+ "Mixtral requires all input models to be Llama or Mistral models"
60
+ )
61
+ return False
62
+ return True
63
+
64
+ def _generate_config(
65
+ self,
66
+ base_config: transformers.PretrainedConfig,
67
+ num_experts: int,
68
+ shared_experts: Optional[int] = None,
69
+ experts_per_token: Optional[int] = None,
70
+ ) -> transformers.PretrainedConfig:
71
+ if shared_experts:
72
+ raise NotImplementedError("Shared experts not supported for Mixtral output")
73
+
74
+ if not isinstance(base_config, transformers.MistralConfig):
75
+ base_cfg_mistral = transformers.MistralConfig(**base_config.to_dict())
76
+ base_cfg_mistral.sliding_window = None
77
+ base_cfg_mistral.max_position_embeddings = (
78
+ base_config.max_position_embeddings
79
+ )
80
+ base_config = base_cfg_mistral
81
+
82
+ out_cfg = transformers.MixtralConfig(**base_config.to_dict())
83
+ out_cfg.architectures = ["MixtralForCausalLM"]
84
+ out_cfg.num_local_experts = num_experts
85
+ out_cfg.num_experts_per_tok = experts_per_token or 2
86
+ out_cfg.sliding_window = None
87
+
88
+ if (out_cfg.num_local_experts & (out_cfg.num_local_experts - 1)) != 0:
89
+ logging.warning(
90
+ f"Your model has {out_cfg.num_local_experts} experts, which is "
91
+ "not a power of two. The model will not be usable in llama.cpp."
92
+ )
93
+ return out_cfg
94
+
95
+ def _remap_weight_name(self, weight: WeightInfo) -> str:
96
+ if ".mlp." not in weight.name:
97
+ # Everything but MLP is identical to base Mistral
98
+ return weight.name
99
+
100
+ res = weight.name
101
+ for needle, replacement in [
102
+ (".mlp.gate_proj", ".block_sparse_moe.experts.{expert_idx}.w1"),
103
+ (".mlp.down_proj", ".block_sparse_moe.experts.{expert_idx}.w2"),
104
+ (".mlp.up_proj", ".block_sparse_moe.experts.{expert_idx}.w3"),
105
+ ]:
106
+ res = res.replace(needle, replacement)
107
+ return res
108
+
109
+ def _router_weight_name(self, layer_idx: int) -> str:
110
+ return f"model.layers.{layer_idx}.block_sparse_moe.gate.weight"
111
+
112
+ def write_model(
113
+ self,
114
+ out_path: str,
115
+ config: MoEMergeConfig,
116
+ merge_options: MergeOptions,
117
+ router_weights: List[torch.Tensor],
118
+ shared_router_weights: Optional[List[torch.Tensor]] = None,
119
+ ):
120
+ base_model = config.base_model
121
+ base_cfg = base_model.config(trust_remote_code=merge_options.trust_remote_code)
122
+
123
+ assert len(router_weights) == base_cfg.num_hidden_layers, (
124
+ f"Expected {base_cfg.num_hidden_layers} router weights, "
125
+ f"got {len(router_weights)}"
126
+ )
127
+
128
+ out_dtype = select_dtype(config, base_cfg)
129
+ out_cfg = self._generate_config(
130
+ base_cfg,
131
+ len(config.experts),
132
+ len(config.shared_experts or []),
133
+ config.experts_per_token,
134
+ )
135
+ out_cfg.torch_dtype = out_dtype
136
+ out_cfg.save_pretrained(out_path)
137
+
138
+ loaders, base_loader, writer = initialize_io(config, out_path, merge_options)
139
+ for weight_info in tqdm.tqdm(
140
+ MISTRAL_INFO.all_weights(base_cfg),
141
+ desc="Weights",
142
+ ):
143
+ tensor_name = self._remap_weight_name(weight_info)
144
+ if "{expert_idx}" in tensor_name:
145
+ for expert_index, expert in enumerate(config.experts):
146
+ expert_name = tensor_name.replace("{expert_idx}", str(expert_index))
147
+ expert_loader = loaders.get(expert.source_model)
148
+ copy_tensor_out(
149
+ weight_info,
150
+ expert_loader,
151
+ writer,
152
+ expert=expert,
153
+ out_dtype=out_dtype,
154
+ output_name=expert_name,
155
+ clone=merge_options.clone_tensors,
156
+ is_residual="down_proj" in tensor_name,
157
+ )
158
+ else:
159
+ # START FINAL PATCH
160
+ # Because WeightInfo is a frozen Pydantic model, we cannot modify it.
161
+ # We must manually load and save the tensor for the tied weights case.
162
+ if (
163
+ weight_info.name == "lm_head.weight"
164
+ and base_cfg.tie_word_embeddings
165
+ ):
166
+ # If tie_word_embeddings is used, lm_head.weight should not be copied.
167
+ pass
168
+
169
+ else:
170
+ tensor = base_loader.get_tensor(weight_info.name)
171
+ writer.save_tensor(
172
+ weight_info.name, # Always save with the correct destination name
173
+ tensor.to(dtype=out_dtype),
174
+ clone=merge_options.clone_tensors,
175
+ )
176
+ # END FINAL PATCH
177
+
178
+ for layer_idx, weight in enumerate(
179
+ tqdm.tqdm(router_weights, desc="Router weights")
180
+ ):
181
+ writer.save_tensor(
182
+ self._router_weight_name(layer_idx),
183
+ weight.to(dtype=out_dtype).contiguous(),
184
+ clone=merge_options.clone_tensors,
185
+ )
186
+
187
+ writer.finalize()
model-00001-of-00001.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c6f18b44ce328ef35438c1da95654bf1fe7dbb0982377d80d7c516d9ba0dd7a
3
- size 1089993904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1265c116b469ee3032ca93a959eb6e608c864e094e905c3533e1e89c4770e2f4
3
+ size 1089915064
model.safetensors.index.json CHANGED
@@ -1 +1 @@
1
- {"metadata": {"mergekit_version": "0.0.6", "total_size": 1089921024}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00001.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.0.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.1.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.2.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.0.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.1.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.2.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.0.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.1.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.experts.2.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.norm.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.0.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.1.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.2.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.3.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.4.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.5.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.6.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.7.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.8.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.9.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.10.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.11.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.12.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.13.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.14.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.15.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.16.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.17.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.18.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.19.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.20.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.21.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.22.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.23.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.24.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.25.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.gate.weight": "model-00001-of-00001.safetensors", "model.layers.26.mlp.shared_expert_gate.weight": "model-00001-of-00001.safetensors"}}
 
1
+ {"metadata": {"mergekit_version": "0.0.6", "total_size": 1089851904}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00001.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.5.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.6.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.7.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.8.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.9.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.24.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.25.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.layers.26.input_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00001.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00001.safetensors", "model.norm.weight": "model-00001-of-00001.safetensors", "model.layers.0.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.1.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.2.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.3.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.4.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.5.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.6.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.7.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.8.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.9.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.10.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.11.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.12.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.13.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.14.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.15.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.16.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.17.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.18.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.19.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.20.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.21.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.22.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.23.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.24.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.25.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors", "model.layers.26.block_sparse_moe.gate.weight": "model-00001-of-00001.safetensors"}}