lsw825 commited on
Commit
e953727
·
verified ·
1 Parent(s): 88483fa

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. config.json +21 -13
  3. configuration_deepseek.py +212 -0
  4. generation_config.json +4 -0
  5. latest_checkpointed_iteration.txt +1 -0
  6. model-1-of-61.safetensors +3 -0
  7. model-1-of-63.safetensors +3 -0
  8. model-10-of-61.safetensors +3 -0
  9. model-10-of-63.safetensors +3 -0
  10. model-11-of-61.safetensors +3 -0
  11. model-11-of-63.safetensors +3 -0
  12. model-12-of-61.safetensors +3 -0
  13. model-12-of-63.safetensors +3 -0
  14. model-13-of-61.safetensors +3 -0
  15. model-13-of-63.safetensors +3 -0
  16. model-14-of-61.safetensors +3 -0
  17. model-14-of-63.safetensors +3 -0
  18. model-15-of-61.safetensors +3 -0
  19. model-15-of-63.safetensors +3 -0
  20. model-16-of-61.safetensors +3 -0
  21. model-16-of-63.safetensors +3 -0
  22. model-17-of-61.safetensors +3 -0
  23. model-17-of-63.safetensors +3 -0
  24. model-18-of-61.safetensors +3 -0
  25. model-18-of-63.safetensors +3 -0
  26. model-19-of-61.safetensors +3 -0
  27. model-19-of-63.safetensors +3 -0
  28. model-2-of-61.safetensors +3 -0
  29. model-2-of-63.safetensors +3 -0
  30. model-20-of-61.safetensors +3 -0
  31. model-20-of-63.safetensors +3 -0
  32. model-21-of-61.safetensors +3 -0
  33. model-21-of-63.safetensors +3 -0
  34. model-22-of-61.safetensors +3 -0
  35. model-22-of-63.safetensors +3 -0
  36. model-23-of-61.safetensors +3 -0
  37. model-23-of-63.safetensors +3 -0
  38. model-24-of-61.safetensors +3 -0
  39. model-24-of-63.safetensors +3 -0
  40. model-25-of-61.safetensors +3 -0
  41. model-25-of-63.safetensors +3 -0
  42. model-26-of-61.safetensors +3 -0
  43. model-26-of-63.safetensors +3 -0
  44. model-27-of-61.safetensors +3 -0
  45. model-27-of-63.safetensors +3 -0
  46. model-28-of-61.safetensors +3 -0
  47. model-28-of-63.safetensors +3 -0
  48. model-29-of-61.safetensors +3 -0
  49. model-29-of-63.safetensors +3 -0
  50. model-3-of-61.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -12,27 +12,27 @@
12
  "aux_loss_alpha": 0.001,
13
  "bos_token_id": 163584,
14
  "eos_token_id": 163585,
15
- "first_k_dense_replace": 0,
16
  "hidden_act": "silu",
17
- "hidden_size": 1408,
18
  "initializer_range": 0.02,
19
- "intermediate_size": 9216,
20
  "kv_lora_rank": 512,
21
- "max_position_embeddings": 163840,
22
  "model_type": "kimi_k2",
23
- "moe_intermediate_size": 640,
24
  "moe_layer_freq": 1,
25
  "n_group": 1,
26
- "n_routed_experts": 64,
27
  "n_shared_experts": 1,
28
  "norm_topk_prob": true,
29
- "num_attention_heads": 18,
30
  "num_experts_per_tok": 8,
31
- "num_hidden_layers": 18,
32
- "num_key_value_heads": 18,
33
  "num_nextn_predict_layers": 0,
34
  "pretraining_tp": 1,
35
- "q_lora_rank": null,
36
  "qk_nope_head_dim": 128,
37
  "qk_rope_head_dim": 64,
38
  "quantization_config": {
@@ -46,7 +46,16 @@
46
  },
47
  "rms_norm_eps": 1e-06,
48
  "rope_theta": 50000.0,
49
- "routed_scaling_factor": 2.446,
 
 
 
 
 
 
 
 
 
50
  "scoring_func": "sigmoid",
51
  "seq_aux": true,
52
  "tie_word_embeddings": false,
@@ -56,6 +65,5 @@
56
  "transformers_version": "4.48.3",
57
  "use_cache": true,
58
  "v_head_dim": 128,
59
- "vocab_size": 163840,
60
- "rope_scaling": null
61
  }
 
12
  "aux_loss_alpha": 0.001,
13
  "bos_token_id": 163584,
14
  "eos_token_id": 163585,
15
+ "first_k_dense_replace": 1,
16
  "hidden_act": "silu",
17
+ "hidden_size": 7168,
18
  "initializer_range": 0.02,
19
+ "intermediate_size": 18432,
20
  "kv_lora_rank": 512,
21
+ "max_position_embeddings": 131072,
22
  "model_type": "kimi_k2",
23
+ "moe_intermediate_size": 2048,
24
  "moe_layer_freq": 1,
25
  "n_group": 1,
26
+ "n_routed_experts": 384,
27
  "n_shared_experts": 1,
28
  "norm_topk_prob": true,
29
+ "num_attention_heads": 64,
30
  "num_experts_per_tok": 8,
31
+ "num_hidden_layers": 61,
32
+ "num_key_value_heads": 64,
33
  "num_nextn_predict_layers": 0,
34
  "pretraining_tp": 1,
35
+ "q_lora_rank": 1536,
36
  "qk_nope_head_dim": 128,
37
  "qk_rope_head_dim": 64,
38
  "quantization_config": {
 
46
  },
47
  "rms_norm_eps": 1e-06,
48
  "rope_theta": 50000.0,
49
+ "routed_scaling_factor": 2.827,
50
+ "rope_scaling": {
51
+ "beta_fast": 1.0,
52
+ "beta_slow": 1.0,
53
+ "factor": 32.0,
54
+ "mscale": 1.0,
55
+ "mscale_all_dim": 1.0,
56
+ "original_max_position_embeddings": 4096,
57
+ "type": "yarn"
58
+ },
59
  "scoring_func": "sigmoid",
60
  "seq_aux": true,
61
  "tie_word_embeddings": false,
 
65
  "transformers_version": "4.48.3",
66
  "use_cache": true,
67
  "v_head_dim": 128,
68
+ "vocab_size": 163840
 
69
  }
configuration_deepseek.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
+ class DeepseekV3Config(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 129280):
21
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 11008):
26
+ Dimension of the MLP representations.
27
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
+ Dimension of the MoE representations.
29
+ num_hidden_layers (`int`, *optional*, defaults to 32):
30
+ Number of hidden layers in the Transformer decoder.
31
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
+ Number of nextn predict layers in the DeepSeekV3 Model.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ n_shared_experts (`int`, *optional*, defaults to None):
36
+ Number of shared experts, None means dense model.
37
+ n_routed_experts (`int`, *optional*, defaults to None):
38
+ Number of routed experts, None means dense model.
39
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
+ Scaling factor or routed experts.
41
+ topk_method (`str`, *optional*, defaults to `gready`):
42
+ Topk method used in routed gate.
43
+ n_group (`int`, *optional*, defaults to None):
44
+ Number of groups for routed experts.
45
+ topk_group (`int`, *optional*, defaults to None):
46
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
+ num_experts_per_tok (`int`, *optional*, defaults to None):
48
+ Number of selected experts, None means dense model.
49
+ moe_layer_freq (`int`, *optional*, defaults to 1):
50
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
52
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
+ \--k dense layers--/
54
+ norm_topk_prob (`bool`, *optional*, defaults to False):
55
+ Whether to normalize the weights of the routed experts.
56
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
57
+ Method of computing expert weights.
58
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
+ Auxiliary loss weight coefficient.
60
+ seq_aux = (`bool`, *optional*, defaults to True):
61
+ Whether to compute the auxiliary loss for each individual sample.
62
+ num_key_value_heads (`int`, *optional*):
63
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
+ by meanpooling all the original heads within that group. For more details checkout [this
68
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
+ `num_attention_heads`.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in the decoder.
72
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
73
+ The maximum sequence length that this model might ever be used with.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
+ The epsilon used by the rms normalization layers.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ pad_token_id (`int`, *optional*):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 1):
84
+ Beginning of stream token id.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ End of stream token id.
87
+ pretraining_tp (`int`, *optional*, defaults to 1):
88
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
+ issue](https://github.com/pytorch/pytorch/issues/76232).
92
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
+ Whether to tie weight embeddings
94
+ rope_theta (`float`, *optional*, defaults to 10000.0):
95
+ The base period of the RoPE embeddings.
96
+ rope_scaling (`Dict`, *optional*):
97
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
+ `max_position_embeddings` to the expected new maximum.
101
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
+ attention_dropout (`float`, *optional*, defaults to 0.0):
104
+ The dropout ratio for the attention probabilities.
105
+
106
+ ```python
107
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
+
109
+ >>> # Initializing a Deepseek-V3 style configuration
110
+ >>> configuration = DeepseekV3Config()
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "deepseek_v3"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_size=129280,
122
+ hidden_size=7168,
123
+ intermediate_size=18432,
124
+ moe_intermediate_size = 2048,
125
+ num_hidden_layers=61,
126
+ num_nextn_predict_layers=1,
127
+ num_attention_heads=128,
128
+ num_key_value_heads=128,
129
+ n_shared_experts = 1,
130
+ n_routed_experts = 256,
131
+ ep_size = 1,
132
+ routed_scaling_factor = 2.5,
133
+ kv_lora_rank = 512,
134
+ q_lora_rank = 1536,
135
+ qk_rope_head_dim = 64,
136
+ v_head_dim = 128,
137
+ qk_nope_head_dim = 128,
138
+ topk_method = 'noaux_tc',
139
+ n_group = 8,
140
+ topk_group = 4,
141
+ num_experts_per_tok = 8,
142
+ moe_layer_freq = 1,
143
+ first_k_dense_replace = 3,
144
+ norm_topk_prob = True,
145
+ scoring_func = 'sigmoid',
146
+ aux_loss_alpha = 0.001,
147
+ seq_aux = True,
148
+ hidden_act="silu",
149
+ max_position_embeddings=4096,
150
+ initializer_range=0.02,
151
+ rms_norm_eps=1e-6,
152
+ use_cache=True,
153
+ pad_token_id=None,
154
+ bos_token_id=0,
155
+ eos_token_id=1,
156
+ pretraining_tp=1,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ attention_bias=False,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.moe_intermediate_size = moe_intermediate_size
169
+ self.num_hidden_layers = num_hidden_layers
170
+ self.num_nextn_predict_layers = num_nextn_predict_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.n_shared_experts = n_shared_experts
173
+ self.n_routed_experts = n_routed_experts
174
+ self.ep_size = ep_size
175
+ self.routed_scaling_factor = routed_scaling_factor
176
+ self.kv_lora_rank = kv_lora_rank
177
+ self.q_lora_rank = q_lora_rank
178
+ self.qk_rope_head_dim = qk_rope_head_dim
179
+ self.v_head_dim = v_head_dim
180
+ self.qk_nope_head_dim = qk_nope_head_dim
181
+ self.topk_method = topk_method
182
+ self.n_group = n_group
183
+ self.topk_group = topk_group
184
+ self.num_experts_per_tok = num_experts_per_tok
185
+ self.moe_layer_freq = moe_layer_freq
186
+ self.first_k_dense_replace = first_k_dense_replace
187
+ self.norm_topk_prob = norm_topk_prob
188
+ self.scoring_func = scoring_func
189
+ self.aux_loss_alpha = aux_loss_alpha
190
+ self.seq_aux = seq_aux
191
+ # for backward compatibility
192
+ if num_key_value_heads is None:
193
+ num_key_value_heads = num_attention_heads
194
+
195
+ self.num_key_value_heads = num_key_value_heads
196
+ self.hidden_act = hidden_act
197
+ self.initializer_range = initializer_range
198
+ self.rms_norm_eps = rms_norm_eps
199
+ self.pretraining_tp = pretraining_tp
200
+ self.use_cache = use_cache
201
+ self.rope_theta = rope_theta
202
+ self.rope_scaling = rope_scaling
203
+ self.attention_bias = attention_bias
204
+ self.attention_dropout = attention_dropout
205
+
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ tie_word_embeddings=tie_word_embeddings,
211
+ **kwargs,
212
+ )
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_length": 131072,
3
+ "eos_token_id": 163585
4
+ }
latest_checkpointed_iteration.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 517
model-1-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6588a247fcf5538b0fdcfeea096893e8b7dd9c5b060f732c64a9c8e4513642d9
3
+ size 2846451040
model-1-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b66929949e38022253b91face3fc5aa06b157d6c546bc464013ed268e561353f
3
+ size 497640784
model-10-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a1acc308054a60d93c09064542a8234d077ed404da111e23e00a7dd477311d
3
+ size 17066593104
model-10-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7088a10a61ce403feae24d019062427e13009c0fb030a719940865228e2a408
3
+ size 17066593248
model-11-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7bb19290aa16be216ed59d616d1ef8c837dfc31e73ea81860ae0c17b6bc066
3
+ size 17066595432
model-11-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a2679ca189fbf496682b086b918caa497ce4d0485c385efa7816d61ef12f9a1
3
+ size 17066595576
model-12-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6c4432d6562db0c22405ecfba1d9b5e8c7e52f63a53f223f379d516201678c
3
+ size 17066595432
model-12-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe000201a5a7d7e9a46d9a17123991b84e72b14e64a80e8e78a646c5396ee2cd
3
+ size 17066595576
model-13-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a53278f0777055cf4cf3f5e9daad803cf6895dd126f544880d3093c3c25be2ec
3
+ size 17066595432
model-13-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8a15b9848c8d28eaa45726ccbd6b61ea6a050795f84c8ace58d6665691d4d3f
3
+ size 17066595576
model-14-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:260ce11bc3e2ff1ab14a610ef3eb77537c1f38ac3674c4e3921f19495af7ac58
3
+ size 17066595432
model-14-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:595d0048b97e3e537b6de012da09c8decd2a52868107c725ff59cf61d9b96adb
3
+ size 17066595576
model-15-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f032c15758f09958ebac92eda24a68bd029e8db3e9f877864b36611d76187ea6
3
+ size 17066595432
model-15-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec32f06f8befe4bec244a1639fba8f6d36d0caa0466b3b9acd1dd76d1db4b1b2
3
+ size 17066595576
model-16-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a277ec201afc53edda1c109cae0f927a6c5ed25a5c8efb30556ed3f1c43c478
3
+ size 17066595432
model-16-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:557b2132770c324eae73e47f1beeac95962358ec11c81d1d49045fa31f7ef3b9
3
+ size 17066595576
model-17-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5b8f09959653888b434fbd7ecb0e4abb11f8be7fa222386733a57b567e69d4c
3
+ size 17066595432
model-17-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43cd060ebe95fb6b42b4f99dc3880a37f33024cf8ab257ef5cd3a04c05c33da6
3
+ size 17066595576
model-18-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f1633da1551f0f6cd8948ef8e43be92fe94f660876570d8ba1ebf06320439f4
3
+ size 17066595432
model-18-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eb03d5693efa319dea1261aa0670252614154ae9a60b9b2027b0a9e8af989e2
3
+ size 17066595576
model-19-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f0a7d395f0e8e9cbe3150a315a9f199222c19feae870f002610856c605a12a
3
+ size 17066595432
model-19-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:144e1a30a9f46ebe0e33d9edf39a19db4be82b2664b87f5dada02003cf42624a
3
+ size 17066595576
model-2-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b30915b84c270ce710df51981156d8b3f75a091df21238fcc7815c3ec88a61
3
+ size 17066593104
model-2-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2055a533cf23eee917a58846f68575860560058a3f07e17b5cb181d598b83c98
3
+ size 17066593248
model-20-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:847d034c34f5c9e4ad173b8808b43eb771b9359b20c286d49d892d621e88608c
3
+ size 17066595432
model-20-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eae77b383a177fd9f34d939e2fd9e26ee7856055cbf15871df0e099dfdedbbb0
3
+ size 17066595576
model-21-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2e432244d1dfbe7e6ad290ddd0f0fc3b499978342b691e6545db6f6c1984a04
3
+ size 17066595432
model-21-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ea7434751a77c5ed9b4edb23e9cdbf8b146d8db56fe0934197522dfe7bf7387
3
+ size 17066595576
model-22-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4212dcf0872a741ac89c1df73d26278a85393b85649029608a47ffb689bfbff
3
+ size 17066595432
model-22-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd21a14846bbddebc1accc6757cd5190320249e7d1217e77bc5d91b44e09e4e
3
+ size 17066595576
model-23-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba7127d0d6b8a4846eedf2d79af2f99ec017ed4ab73d9050f90789cce927418
3
+ size 17066595432
model-23-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6f86ff3f901e65e6190638bdd53f4854e394e2d0d3bb9da3cd97734ae060bf2
3
+ size 17066595576
model-24-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47dda5474725ca5ec02aa6ccfbfd742319761ed6be04151aa23efb7498b46a17
3
+ size 17066595432
model-24-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57865459c0a6dec1a78b7f7bfd297de8ddaa355bc85a711a69dad1790166a550
3
+ size 17066595576
model-25-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8043e47252b37cbded4997128fd82501ebe13b55ce1b0156e8f0dc82934000c0
3
+ size 17066595432
model-25-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c4c026fbf85f13ef613374431d2caeea95b2b4f210113ec9ef3fda4e635d9c2
3
+ size 17066595576
model-26-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7c266b4f2c2fb89d08148c0523d5ba59971ae5806587943f217064ee87dc1b
3
+ size 17066595432
model-26-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba8714628aaa8980b633aaeaa9189358aa64d165539d1e211ac645fb1eed508
3
+ size 17066595576
model-27-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4959b33787160976972c53e5b4849170e1d9bd194bab1b13f8542c21defbef93
3
+ size 17066595432
model-27-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b745fab718f3e6c097294ac7ad9811db4c00b33cf5a1a76449a9257d6a92672
3
+ size 17066595576
model-28-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4110af78af032ce3ebe8baae28f20dbdf455092cf35220a34cecc213d5ca0adf
3
+ size 17066595432
model-28-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17b4ed412797c87a1419350be495118590483a225f0e26ae50136d711bc709c6
3
+ size 17066595576
model-29-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d261aaa72fc71696b20af14c5ab1d36263532030f91d20c5951c33b5f0819147
3
+ size 17066595432
model-29-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05d8a04aef1e2e1a2c56e15b5314fc28bc19c7e6d7fe5e33baf37ad2664d5d3a
3
+ size 17066595576
model-3-of-61.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddcd18f3ec7ebf67ae168a6a0ba4cd1eb35f836c8f11bdfac2a1aa62ad45c590
3
+ size 17066593104