diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..8762a2669af9875fde0a424b7036dfecea14d0ee --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,397 @@ +{#- + In addition to the normal inputs of `messages` and `tools`, this template also accepts the + following kwargs: + - "builtin_tools": A list, can contain "browser" and/or "python". + - "model_identity": A string that optionally describes the model identity. + - "reasoning_effort": A string that describes the reasoning effort, defaults to "medium". + #} + +{#- Tool Definition Rendering ============================================== #} +{%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%} + {%- if param_spec.type == "array" -%} + {%- if param_spec['items'] -%} + {%- if param_spec['items']['type'] == "string" -%} + {{- "string[]" }} + {%- elif param_spec['items']['type'] == "number" -%} + {{- "number[]" }} + {%- elif param_spec['items']['type'] == "integer" -%} + {{- "number[]" }} + {%- elif param_spec['items']['type'] == "boolean" -%} + {{- "boolean[]" }} + {%- else -%} + {%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%} + {%- if inner_type == "object | object" or inner_type|length > 50 -%} + {{- "any[]" }} + {%- else -%} + {{- inner_type + "[]" }} + {%- endif -%} + {%- endif -%} + {%- if param_spec.nullable -%} + {{- " | null" }} + {%- endif -%} + {%- else -%} + {{- "any[]" }} + {%- if param_spec.nullable -%} + {{- " | null" }} + {%- endif -%} + {%- endif -%} + {%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%} + {#- Handle array of types like ["object", "object"] from Union[dict, list] #} + {%- if param_spec.type | length > 1 -%} + {{- param_spec.type | join(" | ") }} + {%- else -%} + {{- param_spec.type[0] }} + {%- endif -%} + {%- elif param_spec.oneOf -%} + {#- Handle oneOf schemas - check for complex unions and fallback to any #} + {%- set has_object_variants = false -%} + {%- for variant in param_spec.oneOf -%} + {%- if variant.type == "object" -%} + {%- set has_object_variants = true -%} + {%- endif -%} + {%- endfor -%} + {%- if has_object_variants and param_spec.oneOf|length > 1 -%} + {{- "any" }} + {%- else -%} + {%- for variant in param_spec.oneOf -%} + {{- render_typescript_type(variant, required_params) -}} + {%- if variant.description %} + {{- "// " + variant.description }} + {%- endif -%} + {%- if variant.default is defined %} + {{ "// default: " + variant.default|tojson }} + {%- endif -%} + {%- if not loop.last %} + {{- " | " }} + {% endif -%} + {%- endfor -%} + {%- endif -%} + {%- elif param_spec.type == "string" -%} + {%- if param_spec.enum -%} + {{- '"' + param_spec.enum|join('" | "') + '"' -}} + {%- else -%} + {{- "string" }} + {%- if param_spec.nullable %} + {{- " | null" }} + {%- endif -%} + {%- endif -%} + {%- elif param_spec.type == "number" -%} + {{- "number" }} + {%- elif param_spec.type == "integer" -%} + {{- "number" }} + {%- elif param_spec.type == "boolean" -%} + {{- "boolean" }} + + {%- elif param_spec.type == "object" -%} + {%- if param_spec.properties -%} + {{- "{ +" }} + {%- for prop_name, prop_spec in param_spec.properties.items() -%} + {{- prop_name -}} + {%- if prop_name not in (param_spec.required or []) -%} + {{- "?" }} + {%- endif -%} + {{- ": " }} + {{ render_typescript_type(prop_spec, param_spec.required or []) }} + {%- if not loop.last -%} + {{-", " }} + {%- endif -%} + {%- endfor -%} + {{- "}" }} + {%- else -%} + {{- "object" }} + {%- endif -%} + {%- else -%} + {{- "any" }} + {%- endif -%} +{%- endmacro -%} + +{%- macro render_tool_namespace(namespace_name, tools) -%} + {{- "## " + namespace_name + " + +" }} + {{- "namespace " + namespace_name + " { + +" }} + {%- for tool in tools %} + {%- set tool = tool.function %} + {{- "// " + tool.description + " +" }} + {{- "type "+ tool.name + " = " }} + {%- if tool.parameters and tool.parameters.properties %} + {{- "(_: { +" }} + {%- for param_name, param_spec in tool.parameters.properties.items() %} + {%- if param_spec.description %} + {{- "// " + param_spec.description + " +" }} + {%- endif %} + {{- param_name }} + {%- if param_name not in (tool.parameters.required or []) -%} + {{- "?" }} + {%- endif -%} + {{- ": " }} + {{- render_typescript_type(param_spec, tool.parameters.required or []) }} + {%- if param_spec.default is defined -%} + {%- if param_spec.enum %} + {{- ", // default: " + param_spec.default }} + {%- elif param_spec.oneOf %} + {{- "// default: " + param_spec.default }} + {%- else %} + {{- ", // default: " + param_spec.default|tojson }} + {%- endif -%} + {%- endif -%} + {%- if not loop.last %} + {{- ", +" }} + {%- else %} + {{- " +" }} + {%- endif -%} + {%- endfor %} + {{- "}) => any; + +" }} + {%- else -%} + {{- "() => any; + +" }} + {%- endif -%} + {%- endfor %} + {{- "} // namespace " + namespace_name }} +{%- endmacro -%} + +{%- macro render_builtin_tools(browser_tool, python_tool) -%} + {%- if browser_tool %} + {{- "## browser + +" }} + {{- "// Tool for browsing. +" }} + {{- "// The `cursor` appears in brackets before each browsing display: `[{cursor}]`. +" }} + {{- "// Cite information from the tool using the following format: +" }} + {{- "// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`. +" }} + {{- "// Do not quote more than 10 words directly from the tool output. +" }} + {{- "// sources=web (default: web) +" }} + {{- "namespace browser { + +" }} + {{- "// Searches for information related to `query` and displays `topn` results. +" }} + {{- "type search = (_: { +" }} + {{- "query: string, +" }} + {{- "topn?: number, // default: 10 +" }} + {{- "source?: string, +" }} + {{- "}) => any; + +" }} + {{- "// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines. +" }} + {{- "// Valid link ids are displayed with the formatting: `【{id}†.*】`. +" }} + {{- "// If `cursor` is not provided, the most recent page is implied. +" }} + {{- "// If `id` is a string, it is treated as a fully qualified URL associated with `source`. +" }} + {{- "// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available. +" }} + {{- "// Use this function without `id` to scroll to a new location of an opened page. +" }} + {{- "type open = (_: { +" }} + {{- "id?: number | string, // default: -1 +" }} + {{- "cursor?: number, // default: -1 +" }} + {{- "loc?: number, // default: -1 +" }} + {{- "num_lines?: number, // default: -1 +" }} + {{- "view_source?: boolean, // default: false +" }} + {{- "source?: string, +" }} + {{- "}) => any; + +" }} + {{- "// Finds exact matches of `pattern` in the current page, or the page given by `cursor`. +" }} + {{- "type find = (_: { +" }} + {{- "pattern: string, +" }} + {{- "cursor?: number, // default: -1 +" }} + {{- "}) => any; + +" }} + {{- "} // namespace browser + +" }} + {%- endif -%} + + {%- if python_tool %} + {{- "## python + +" }} + {{- "Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files). + +" }} + {{- "When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster. + +" }} + {%- endif -%} +{%- endmacro -%} + +{#- System Message Construction ============================================ #} +{%- macro build_system_message() -%} + {%- if model_identity is not defined %} + {%- set model_identity = "You are ChatGPT, a large language model trained by OpenAI." %} + {%- endif %} + {{- model_identity + " +" }} + {{- "Knowledge cutoff: 2024-06 +" }} + {{- "Current date: " + strftime_now("%Y-%m-%d") + " + +" }} + {%- if reasoning_effort is not defined %} + {%- set reasoning_effort = "medium" %} + {%- endif %} + {{- "Reasoning: " + reasoning_effort + " + +" }} + {%- if builtin_tools %} + {{- "# Tools + +" }} + {%- set available_builtin_tools = namespace(browser=false, python=false) %} + {%- for tool in builtin_tools %} + {%- if tool == "browser" %} + {%- set available_builtin_tools.browser = true %} + {%- elif tool == "python" %} + {%- set available_builtin_tools.python = true %} + {%- endif %} + {%- endfor %} + {{- render_builtin_tools(available_builtin_tools.browser, available_builtin_tools.python) }} + {%- endif -%} + {{- "# Valid channels: analysis, commentary, final. Channel must be included for every message." }} + {%- if tools -%} + {{- " +Calls to these tools must go to the commentary channel: 'functions'." }} + {%- endif -%} +{%- endmacro -%} + +{#- Main Template Logic ================================================= #} +{#- Set defaults #} + +{#- Render system message #} +{{- "<|start|>system<|message|>" }} +{{- build_system_message() }} +{{- "<|end|>" }} + +{#- Extract developer message #} +{%- if messages[0].role == "developer" or messages[0].role == "system" %} + {%- set developer_message = messages[0].content %} + {%- set loop_messages = messages[1:] %} +{%- else %} + {%- set developer_message = "" %} + {%- set loop_messages = messages %} +{%- endif %} + +{#- Render developer message #} +{%- if developer_message or tools %} + {{- "<|start|>developer<|message|>" }} + {%- if developer_message %} + {{- "# Instructions + +" }} + {{- developer_message }} + {%- endif %} + {%- if tools -%} + {{- " + +" }} + {{- "# Tools + +" }} + {{- render_tool_namespace("functions", tools) }} + {%- endif -%} + {{- "<|end|>" }} +{%- endif %} + +{#- Render messages #} +{%- set last_tool_call = namespace(name=none) %} +{%- for message in loop_messages -%} + {#- At this point only assistant/user/tool messages should remain #} + {%- if message.role == 'assistant' -%} + {#- Checks to ensure the messages are being passed in the format we expect #} + {%- if "content" in message %} + {%- if "<|channel|>analysis<|message|>" in message.content or "<|channel|>final<|message|>" in message.content %} + {{- raise_exception("You have passed a message containing <|channel|> tags in the content field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }} + {%- endif %} + {%- endif %} + {%- if "thinking" in message %} + {%- if "<|channel|>analysis<|message|>" in message.thinking or "<|channel|>final<|message|>" in message.thinking %} + {{- raise_exception("You have passed a message containing <|channel|> tags in the thinking field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }} + {%- endif %} + {%- endif %} + {%- if "tool_calls" in message %} + {#- We assume max 1 tool call per message, and so we infer the tool call name #} + {#- in "tool" messages from the most recent assistant tool call name #} + {%- set tool_call = message.tool_calls[0] %} + {%- if tool_call.function %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {%- if message.content and message.thinking %} + {{- raise_exception("Cannot pass both content and thinking in an assistant message with tool calls! Put the analysis message in one or the other, but not both.") }} + {%- elif message.content %} + {{- "<|start|>assistant<|channel|>analysis<|message|>" + message.content + "<|end|>" }} + {%- elif message.thinking %} + {{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }} + {%- endif %} + {{- "<|start|>assistant to=" }} + {{- "functions." + tool_call.name + "<|channel|>commentary " }} + {{- (tool_call.content_type if tool_call.content_type is defined else "json") + "<|message|>" }} + {{- tool_call.arguments|tojson }} + {{- "<|call|>" }} + {%- set last_tool_call.name = tool_call.name %} + {%- elif loop.last and not add_generation_prompt %} + {#- Only render the CoT if the final turn is an assistant turn and add_generation_prompt is false #} + {#- This is a situation that should only occur in training, never in inference. #} + {%- if "thinking" in message %} + {{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }} + {%- endif %} + {#- <|return|> indicates the end of generation, but <|end|> does not #} + {#- <|return|> should never be an input to the model, but we include it as the final token #} + {#- when training, so the model learns to emit it. #} + {{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|return|>" }} + {%- else %} + {#- CoT is dropped during all previous turns, so we never render it for inference #} + {{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|end|>" }} + {%- set last_tool_call.name = none %} + {%- endif %} + {%- elif message.role == 'tool' -%} + {%- if last_tool_call.name is none %} + {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }} + {%- endif %} + {{- "<|start|>functions." + last_tool_call.name }} + {{- " to=assistant<|channel|>commentary<|message|>" + message.content|tojson + "<|end|>" }} + {%- elif message.role == 'user' -%} + {{- "<|start|>user<|message|>" + message.content + "<|end|>" }} + {%- endif -%} +{%- endfor -%} + +{#- Generation prompt #} +{%- if add_generation_prompt -%} +<|start|>assistant +{%- endif -%} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f50da59dc4e07568a206806352a1385d17f95ca5 --- /dev/null +++ b/config.json @@ -0,0 +1,95 @@ +{ + "architectures": [ + "GptOssForCausalLM" + ], + "attention_bias": true, + "attention_dropout": 0.0, + "eos_token_id": 200002, + "experts_per_token": 4, + "head_dim": 64, + "hidden_act": "silu", + "hidden_size": 2880, + "initial_context_length": 4096, + "initializer_range": 0.02, + "intermediate_size": 2880, + "layer_types": [ + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "full_attention" + ], + "max_position_embeddings": 131072, + "model_type": "gpt_oss", + "num_attention_heads": 64, + "num_experts_per_tok": 4, + "num_hidden_layers": 36, + "num_key_value_heads": 8, + "num_local_experts": 128, + "output_router_logits": false, + "pad_token_id": 199999, + "quantization_config": { + "_load_in_4bit": true, + "_load_in_8bit": false, + "bnb_4bit_compute_dtype": "bfloat16", + "bnb_4bit_quant_storage": "uint8", + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_use_double_quant": true, + "llm_int8_enable_fp32_cpu_offload": false, + "llm_int8_has_fp16_weight": false, + "llm_int8_skip_modules": null, + "llm_int8_threshold": 6.0, + "load_in_4bit": true, + "load_in_8bit": false, + "quant_method": "bitsandbytes" + }, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "beta_fast": 32.0, + "beta_slow": 1.0, + "factor": 32.0, + "original_max_position_embeddings": 4096, + "rope_type": "yarn", + "truncate": false + }, + "rope_theta": 150000, + "router_aux_loss_coef": 0.9, + "sliding_window": 128, + "swiglu_limit": 7.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.56.0.dev0", + "use_cache": true, + "vocab_size": 201088 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..78317cfc44866df4ad74d80917d2de9ec763d414 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,10 @@ +{ + "bos_token_id": 199998, + "do_sample": true, + "eos_token_id": [ + 200002, + 199999 + ], + "pad_token_id": 199999, + "transformers_version": "4.56.0.dev0" +} diff --git a/model-00002-of-00073.safetensors b/model-00002-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ffc0cdeabd7728fd42611ee0398fa4b6378779da --- /dev/null +++ b/model-00002-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66f8d893fc4fbd27a07d2454114d5cc84821fa18d7575b625aa184b31ef0a572 +size 4248207640 diff --git a/model-00003-of-00073.safetensors b/model-00003-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..09044e898f17de40644f9dffc9adeb821b79fbb2 --- /dev/null +++ b/model-00003-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dac486466d29442dea478d2c32e08e732574058ca0d9490e266b78eb0ea26a6 +size 2138581743 diff --git a/model-00004-of-00073.safetensors b/model-00004-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..83f6cf600c5f0c8b103e33df6643999950836fb8 --- /dev/null +++ b/model-00004-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dda6a71a050e68fe98ddfc12c0e3e86234b29d2ee5a6ba9083cabcc9e173563f +size 4248207640 diff --git a/model-00006-of-00073.safetensors b/model-00006-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4826abbdd222efc4dd73a42a68a3767db186b184 --- /dev/null +++ b/model-00006-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b81738668bce7b58ec8cef5e8c25b1e2b7afafcd1af7913da2788838263bd0 +size 4248207640 diff --git a/model-00007-of-00073.safetensors b/model-00007-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..67d32270db7513e0de3326b4a1599c6736f7ee1d --- /dev/null +++ b/model-00007-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb18bc8ef6173f2b20de8f6823ff9902420fcf1c7b984b9564a425cbf32788dd +size 2138581741 diff --git a/model-00008-of-00073.safetensors b/model-00008-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2420db70e716e40cceab702078683d15ae2a9c77 --- /dev/null +++ b/model-00008-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b43d83c403e92882c79cdfbeb8b80e30d0d7fe58329ab8d3a8a13db0c1d7c5d +size 4248207640 diff --git a/model-00009-of-00073.safetensors b/model-00009-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..61f09e739b2042cf71b63801731b7cb52c0610a2 --- /dev/null +++ b/model-00009-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e86efab9a378f573a65e720b436136d7210e3207ba65ff4bde911a2def608aa +size 2138581743 diff --git a/model-00010-of-00073.safetensors b/model-00010-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e60d1990f00f619edf4f6cd8d8c70146e442bea3 --- /dev/null +++ b/model-00010-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dc516fd1e058767eb5d566b81f2619fc9319aed76409628d94b9fda5e4994a0 +size 4248207640 diff --git a/model-00012-of-00073.safetensors b/model-00012-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..972054b5252c2a5dfab20c4eac58bd0309ac6975 --- /dev/null +++ b/model-00012-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39b2a45ac61f5b5d44e5a2df4b88643f8c7d57545b06c60121d954a9598599c1 +size 4248207640 diff --git a/model-00013-of-00073.safetensors b/model-00013-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f83cd9557afa376122424aa3848049520840e387 --- /dev/null +++ b/model-00013-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01f7b4e8220fb96dd1a529d7491876964cf2be03c4327e9934519d3aba4d308c +size 2138581743 diff --git a/model-00014-of-00073.safetensors b/model-00014-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b8fb665708d218c8c30bf3b87cbf7e0911c014c5 --- /dev/null +++ b/model-00014-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a4ba78d4b340e08bcdb3fbfb48c26cc57abd581046b6a2a6620b9c63cca14aa +size 4248207640 diff --git a/model-00017-of-00073.safetensors b/model-00017-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f3f7b7db28a1e43f1f18aade62ae4f09ff9ab5a1 --- /dev/null +++ b/model-00017-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d1ba4374a77cb60b31a55f2c418a6d1b00bd557efd4f1f4798c2472444412b +size 2138581741 diff --git a/model-00018-of-00073.safetensors b/model-00018-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..284f05e627e9326343342712706cf4d4af78b817 --- /dev/null +++ b/model-00018-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f52b5fe061cefc784b2fb5a09ce6f30fe1fa6641103a141a5b8722df2a48aff8 +size 4248207640 diff --git a/model-00024-of-00073.safetensors b/model-00024-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cf61bb785574a6ebca04b516a646503ebc931f8c --- /dev/null +++ b/model-00024-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f75eeb2761915a28fd12ae35d2fa13b6b984d9c6829184dc6a81bb778d1811c2 +size 4248207640 diff --git a/model-00025-of-00073.safetensors b/model-00025-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..182d01406a81087377e6dad544642f4b10af38b9 --- /dev/null +++ b/model-00025-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9537389cb195cb1ee2e0528893f3f519a14c35d27d434d10a21580ea83363fbf +size 2138581774 diff --git a/model-00026-of-00073.safetensors b/model-00026-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c0878898b2bbee01fece50654f41f0c4ce2ebba0 --- /dev/null +++ b/model-00026-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78d69f06e29de3eeb3e8df6353d418cfca5a6abb432f90b95c750252e5d3b7e1 +size 4248207640 diff --git a/model-00027-of-00073.safetensors b/model-00027-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c8a4b64a41789d142d016738b2526abfb6963f9b --- /dev/null +++ b/model-00027-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6da56d16866ad461201388a498303bfb7b4d957103d626d5fcfd98e552906f66 +size 2138581775 diff --git a/model-00028-of-00073.safetensors b/model-00028-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..15e444843fc91d96ca903cb006caf5d99846de73 --- /dev/null +++ b/model-00028-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2301da5402b74812725f99d892af657a421d7d3ff1fedf3309c28aa0fa0e4353 +size 4248207640 diff --git a/model-00029-of-00073.safetensors b/model-00029-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3c1875e161cdea117d810ad98940a9bdf5bf1ae6 --- /dev/null +++ b/model-00029-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1336384b3506f6bf09173b86f3a890bb3fcdb20193d932819a968a2fd265c3a4 +size 2138581776 diff --git a/model-00032-of-00073.safetensors b/model-00032-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3cc323a0b5cf83aeb072e6aeee414ddbb742a8e3 --- /dev/null +++ b/model-00032-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a9dc8df6da9507d6f99547ed7b6ab17793cbcd5a81c0c2f3439ff8f6c1d10c9 +size 4248207640 diff --git a/model-00034-of-00073.safetensors b/model-00034-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..088d89c3ce46312b5fb168b42c9b138975af0fdd --- /dev/null +++ b/model-00034-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2278f08494e6ef08a3e4b5794fc650970b01bb6652502c3bf861d9ee39024d52 +size 4248207640 diff --git a/model-00036-of-00073.safetensors b/model-00036-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a8e95409219a417441b9a780689e1c0c90bf0d3c --- /dev/null +++ b/model-00036-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d28428c507c8b4e042c656f2db6c30fc9db7dd6f6c0157dcda930c2a3ea4d11c +size 4248207640 diff --git a/model-00038-of-00073.safetensors b/model-00038-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..849397898f8f12af6c3f25eb4035ca339cbf60ca --- /dev/null +++ b/model-00038-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9704e3b5209e51b7310530e1da1e31b4aafb290a21b1957990b59ee98c08312f +size 4248207640 diff --git a/model-00040-of-00073.safetensors b/model-00040-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..87db2650c968dc085ff0badeaee1c644084e9842 --- /dev/null +++ b/model-00040-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63132593ac1bd4883db66340d38ef61685065e53db5ac40276bbfc1d83fb8bb8 +size 4248207640 diff --git a/model-00041-of-00073.safetensors b/model-00041-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..87194a7338a9895e77bbb3fb87643a856f799ca5 --- /dev/null +++ b/model-00041-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b62d21c43f0bf7a17f868846bc1421907464a4782d8f85265bce5ca6a5e880c6 +size 2138581773 diff --git a/model-00042-of-00073.safetensors b/model-00042-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e077fbf39e8f59fb092cad7e9beb0f11a35d88a2 --- /dev/null +++ b/model-00042-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e39c7084ce12ee967b6dd15c8e1849fa550d50fd8e7d123064c3b3b5a233118a +size 4248207640 diff --git a/model-00045-of-00073.safetensors b/model-00045-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c199d66fd82970e9efbbefe69828ab6fa8ed38ac --- /dev/null +++ b/model-00045-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6ceee23f5a2e781ae1990d1c03d3417160f64aa07a893a7c29da3b48fa809e +size 2138581773 diff --git a/model-00046-of-00073.safetensors b/model-00046-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0f594f984ea46eff3d7723c171a02dd4f2378589 --- /dev/null +++ b/model-00046-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1939048181675b836a1f11862169988e54ecf788d4b74421f664052d565c07a +size 4248207640 diff --git a/model-00050-of-00073.safetensors b/model-00050-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ec92fb99018bb46f07247e1d0cb26ae87e64ccce --- /dev/null +++ b/model-00050-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65373fb48bed15e6db585aec180e53098ef5d59368265be9aa54da55f9ae0e05 +size 4248207640 diff --git a/model-00051-of-00073.safetensors b/model-00051-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9c447cb9e29859ea9d3bead0c7122239498a4fdf --- /dev/null +++ b/model-00051-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcb2a8a97ab2d3b1d49c9138fd428cd75ffa5c079744072874e94429af4f3203 +size 2138581772 diff --git a/model-00052-of-00073.safetensors b/model-00052-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..19291c6fd6b13eefbf7b721a2bf67b482efd69c6 --- /dev/null +++ b/model-00052-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa3fbc51fb6592d3a61b1769c06c2a922fbe396432626191e2b036de3e507d80 +size 4248207640 diff --git a/model-00054-of-00073.safetensors b/model-00054-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d93e6623a9cb4e3de6bf754dc568c3d43496a8c0 --- /dev/null +++ b/model-00054-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0531a970dd6c5d8e7cbae562255f6a3a9436546c311a29602e0fcc0fd8a84e3 +size 4248207640 diff --git a/model-00056-of-00073.safetensors b/model-00056-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1e7113d9e7d57947433e98a77c58c13231e34c68 --- /dev/null +++ b/model-00056-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b05fa63d01ec993405b2926e45798aea2b185200c2f008aeeaea68d0a6945c98 +size 4248207640 diff --git a/model-00057-of-00073.safetensors b/model-00057-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c4acc756a466c0ccdc8044b390ffd03d549ac928 --- /dev/null +++ b/model-00057-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eaf2af70f7c97f15d53dec3f58f4a8e41b949555ecefeb7b802bcadc587ba2a +size 2138581774 diff --git a/model-00058-of-00073.safetensors b/model-00058-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6bad9865380e13fc1ab55d7135f68a43edfa0b21 --- /dev/null +++ b/model-00058-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8641d3b84630260b190480647bc640fd122dbe24a6dd465b6812a17ba73989 +size 4248207640 diff --git a/model-00060-of-00073.safetensors b/model-00060-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ff2ead735964c12ba27e21fd67e94edcc02baedc --- /dev/null +++ b/model-00060-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e2351f41533aec995d9780f0c6a4b47279ab381e02709b46fe618487503602a +size 4248207640 diff --git a/model-00061-of-00073.safetensors b/model-00061-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8ea49411931c2ac0ad79ea5d2ac822dd7aedea70 --- /dev/null +++ b/model-00061-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6b387735d686048c9c46bed56c60c86948a75f96234ff48d303fa760429de9d +size 2138581771 diff --git a/model-00062-of-00073.safetensors b/model-00062-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c2d3cf4ccf45a0fc5ac5dae3961e74f5c2493d3d --- /dev/null +++ b/model-00062-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d21973e03b03ac1b40821ecfe3e5c184ad04e6ef527e0b0a44a2f1879414f3ef +size 4248207640 diff --git a/model-00063-of-00073.safetensors b/model-00063-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6f746e296fc37f41c5aed2997706197270d70446 --- /dev/null +++ b/model-00063-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:360af365e01dbc56645d37210ffd5c4e92d53f6a909aea3d39d4c8d5b5a6bfb3 +size 2138581772 diff --git a/model-00065-of-00073.safetensors b/model-00065-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a8819ba59b75eef9cb946ea3a485e797d569da38 --- /dev/null +++ b/model-00065-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61ab20c7484c82a2f37b8e48867629beeb7a05c4a8175dab203801589a362b72 +size 2138581774 diff --git a/model-00067-of-00073.safetensors b/model-00067-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..723b27877b077fb23206b44af05ef835b56a8e74 --- /dev/null +++ b/model-00067-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25c10d3f1c499ddf6e77bc584c5c6c0969758df03e392bd3c80b616b35fd56b2 +size 2138581773 diff --git a/model-00069-of-00073.safetensors b/model-00069-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9e4968cd0e0615bf6f8e94c0666895e10a05fea5 --- /dev/null +++ b/model-00069-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12dfc9aa6ed931d6d0183cd5dcc4b78f67f2ad2dac336327806838d7f031642e +size 2138581774 diff --git a/model-00070-of-00073.safetensors b/model-00070-of-00073.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..12f4adbd05ea78a9774d4fb7818c600e720e62e5 --- /dev/null +++ b/model-00070-of-00073.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:964669a60753219bb8f516f22d2985b609743436f665afa55dc9c95c40f0dd06 +size 4248207640 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..08756f49f89f64027abb3c281874fa14d385e4b7 --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,1343 @@ +{ + "metadata": { + "total_parameters": 116829156672, + "total_size": 232240806057 + }, + "weight_map": { + "lm_head.weight": "model-00073-of-00073.safetensors", + "model.embed_tokens.weight": "model-00001-of-00073.safetensors", + "model.layers.0.input_layernorm.weight": "model-00003-of-00073.safetensors", + "model.layers.0.mlp.experts.down_proj": "model-00003-of-00073.safetensors", + "model.layers.0.mlp.experts.down_proj_bias": "model-00003-of-00073.safetensors", + "model.layers.0.mlp.experts.gate_up_proj": "model-00002-of-00073.safetensors", + "model.layers.0.mlp.experts.gate_up_proj_bias": "model-00002-of-00073.safetensors", + "model.layers.0.mlp.router.bias": "model-00001-of-00073.safetensors", + "model.layers.0.mlp.router.weight": "model-00001-of-00073.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00003-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight.absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight.quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.bias": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight.absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight.quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight.absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight.quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.sinks": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight.absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight.quant_map": "model-00001-of-00073.safetensors", + "model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00073.safetensors", + "model.layers.1.input_layernorm.weight": "model-00005-of-00073.safetensors", + "model.layers.1.mlp.experts.down_proj": "model-00005-of-00073.safetensors", + "model.layers.1.mlp.experts.down_proj_bias": "model-00005-of-00073.safetensors", + "model.layers.1.mlp.experts.gate_up_proj": "model-00004-of-00073.safetensors", + "model.layers.1.mlp.experts.gate_up_proj_bias": "model-00004-of-00073.safetensors", + "model.layers.1.mlp.router.bias": "model-00003-of-00073.safetensors", + "model.layers.1.mlp.router.weight": "model-00003-of-00073.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00005-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.bias": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight.absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight.quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.bias": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight.absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight.quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.bias": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight.absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight.quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.sinks": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.bias": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight.absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight.quant_map": "model-00003-of-00073.safetensors", + "model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00073.safetensors", + "model.layers.10.input_layernorm.weight": "model-00023-of-00073.safetensors", + "model.layers.10.mlp.experts.down_proj": "model-00023-of-00073.safetensors", + "model.layers.10.mlp.experts.down_proj_bias": "model-00023-of-00073.safetensors", + "model.layers.10.mlp.experts.gate_up_proj": "model-00022-of-00073.safetensors", + "model.layers.10.mlp.experts.gate_up_proj_bias": "model-00022-of-00073.safetensors", + "model.layers.10.mlp.router.bias": "model-00021-of-00073.safetensors", + "model.layers.10.mlp.router.weight": "model-00021-of-00073.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00023-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.bias": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight.absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight.nested_absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight.nested_quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight.quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.bias": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight.absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight.nested_absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight.nested_quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight.quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.bias": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight.absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight.nested_absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight.nested_quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight.quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.sinks": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.bias": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight.absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight.nested_absmax": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight.nested_quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight.quant_map": "model-00021-of-00073.safetensors", + "model.layers.10.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00021-of-00073.safetensors", + "model.layers.11.input_layernorm.weight": "model-00025-of-00073.safetensors", + "model.layers.11.mlp.experts.down_proj": "model-00025-of-00073.safetensors", + "model.layers.11.mlp.experts.down_proj_bias": "model-00025-of-00073.safetensors", + "model.layers.11.mlp.experts.gate_up_proj": "model-00024-of-00073.safetensors", + "model.layers.11.mlp.experts.gate_up_proj_bias": "model-00024-of-00073.safetensors", + "model.layers.11.mlp.router.bias": "model-00023-of-00073.safetensors", + "model.layers.11.mlp.router.weight": "model-00023-of-00073.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00025-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.bias": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight.absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight.nested_absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight.nested_quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight.quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.bias": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight.absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight.nested_absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight.nested_quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight.quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.bias": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight.absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight.nested_absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight.nested_quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight.quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.sinks": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.bias": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight.absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight.nested_absmax": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight.nested_quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight.quant_map": "model-00023-of-00073.safetensors", + "model.layers.11.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00023-of-00073.safetensors", + "model.layers.12.input_layernorm.weight": "model-00027-of-00073.safetensors", + "model.layers.12.mlp.experts.down_proj": "model-00027-of-00073.safetensors", + "model.layers.12.mlp.experts.down_proj_bias": "model-00027-of-00073.safetensors", + "model.layers.12.mlp.experts.gate_up_proj": "model-00026-of-00073.safetensors", + "model.layers.12.mlp.experts.gate_up_proj_bias": "model-00026-of-00073.safetensors", + "model.layers.12.mlp.router.bias": "model-00025-of-00073.safetensors", + "model.layers.12.mlp.router.weight": "model-00025-of-00073.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00027-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.bias": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight.absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight.nested_absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight.nested_quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight.quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.bias": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight.absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight.nested_absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight.nested_quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight.quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.bias": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight.absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight.nested_absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight.nested_quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight.quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.sinks": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.bias": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight.absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight.nested_absmax": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight.nested_quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight.quant_map": "model-00025-of-00073.safetensors", + "model.layers.12.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00025-of-00073.safetensors", + "model.layers.13.input_layernorm.weight": "model-00029-of-00073.safetensors", + "model.layers.13.mlp.experts.down_proj": "model-00029-of-00073.safetensors", + "model.layers.13.mlp.experts.down_proj_bias": "model-00029-of-00073.safetensors", + "model.layers.13.mlp.experts.gate_up_proj": "model-00028-of-00073.safetensors", + "model.layers.13.mlp.experts.gate_up_proj_bias": "model-00028-of-00073.safetensors", + "model.layers.13.mlp.router.bias": "model-00027-of-00073.safetensors", + "model.layers.13.mlp.router.weight": "model-00027-of-00073.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00029-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.bias": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight.absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight.nested_absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight.nested_quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight.quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.bias": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight.absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight.nested_absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight.nested_quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight.quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.bias": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight.absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight.nested_absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight.nested_quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight.quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.sinks": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.bias": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight.absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight.nested_absmax": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight.nested_quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight.quant_map": "model-00027-of-00073.safetensors", + "model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00027-of-00073.safetensors", + "model.layers.14.input_layernorm.weight": "model-00031-of-00073.safetensors", + "model.layers.14.mlp.experts.down_proj": "model-00031-of-00073.safetensors", + "model.layers.14.mlp.experts.down_proj_bias": "model-00031-of-00073.safetensors", + "model.layers.14.mlp.experts.gate_up_proj": "model-00030-of-00073.safetensors", + "model.layers.14.mlp.experts.gate_up_proj_bias": "model-00030-of-00073.safetensors", + "model.layers.14.mlp.router.bias": "model-00029-of-00073.safetensors", + "model.layers.14.mlp.router.weight": "model-00029-of-00073.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00031-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.bias": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight.absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight.nested_absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight.nested_quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight.quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.bias": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight.absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight.nested_absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight.nested_quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight.quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.bias": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight.absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight.nested_absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight.nested_quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight.quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.sinks": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.bias": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight.absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight.nested_absmax": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight.nested_quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight.quant_map": "model-00029-of-00073.safetensors", + "model.layers.14.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00029-of-00073.safetensors", + "model.layers.15.input_layernorm.weight": "model-00033-of-00073.safetensors", + "model.layers.15.mlp.experts.down_proj": "model-00033-of-00073.safetensors", + "model.layers.15.mlp.experts.down_proj_bias": "model-00033-of-00073.safetensors", + "model.layers.15.mlp.experts.gate_up_proj": "model-00032-of-00073.safetensors", + "model.layers.15.mlp.experts.gate_up_proj_bias": "model-00032-of-00073.safetensors", + "model.layers.15.mlp.router.bias": "model-00031-of-00073.safetensors", + "model.layers.15.mlp.router.weight": "model-00031-of-00073.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00033-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.bias": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight.absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight.nested_absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight.nested_quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight.quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.bias": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight.absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight.nested_absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight.nested_quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight.quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.bias": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight.absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight.nested_absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight.nested_quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight.quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.sinks": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.bias": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight.absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight.nested_absmax": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight.nested_quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight.quant_map": "model-00031-of-00073.safetensors", + "model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00031-of-00073.safetensors", + "model.layers.16.input_layernorm.weight": "model-00035-of-00073.safetensors", + "model.layers.16.mlp.experts.down_proj": "model-00035-of-00073.safetensors", + "model.layers.16.mlp.experts.down_proj_bias": "model-00035-of-00073.safetensors", + "model.layers.16.mlp.experts.gate_up_proj": "model-00034-of-00073.safetensors", + "model.layers.16.mlp.experts.gate_up_proj_bias": "model-00034-of-00073.safetensors", + "model.layers.16.mlp.router.bias": "model-00033-of-00073.safetensors", + "model.layers.16.mlp.router.weight": "model-00033-of-00073.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00035-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.bias": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight.absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight.nested_absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight.nested_quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight.quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.bias": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight.absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight.nested_absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight.nested_quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight.quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.bias": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight.absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight.nested_absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight.nested_quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight.quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.sinks": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.bias": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight.absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight.nested_absmax": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight.nested_quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight.quant_map": "model-00033-of-00073.safetensors", + "model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00033-of-00073.safetensors", + "model.layers.17.input_layernorm.weight": "model-00037-of-00073.safetensors", + "model.layers.17.mlp.experts.down_proj": "model-00037-of-00073.safetensors", + "model.layers.17.mlp.experts.down_proj_bias": "model-00037-of-00073.safetensors", + "model.layers.17.mlp.experts.gate_up_proj": "model-00036-of-00073.safetensors", + "model.layers.17.mlp.experts.gate_up_proj_bias": "model-00036-of-00073.safetensors", + "model.layers.17.mlp.router.bias": "model-00035-of-00073.safetensors", + "model.layers.17.mlp.router.weight": "model-00035-of-00073.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00037-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.bias": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight.absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight.nested_absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight.nested_quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight.quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.bias": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight.absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight.nested_absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight.nested_quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight.quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.bias": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight.absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight.nested_absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight.nested_quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight.quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.sinks": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.bias": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight.absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight.nested_absmax": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight.nested_quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight.quant_map": "model-00035-of-00073.safetensors", + "model.layers.17.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00035-of-00073.safetensors", + "model.layers.18.input_layernorm.weight": "model-00039-of-00073.safetensors", + "model.layers.18.mlp.experts.down_proj": "model-00039-of-00073.safetensors", + "model.layers.18.mlp.experts.down_proj_bias": "model-00039-of-00073.safetensors", + "model.layers.18.mlp.experts.gate_up_proj": "model-00038-of-00073.safetensors", + "model.layers.18.mlp.experts.gate_up_proj_bias": "model-00038-of-00073.safetensors", + "model.layers.18.mlp.router.bias": "model-00037-of-00073.safetensors", + "model.layers.18.mlp.router.weight": "model-00037-of-00073.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00039-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.bias": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight.absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight.nested_absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight.nested_quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight.quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.bias": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight.absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight.nested_absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight.nested_quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight.quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.bias": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight.absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight.nested_absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight.nested_quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight.quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.sinks": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.bias": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight.absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight.nested_absmax": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight.nested_quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight.quant_map": "model-00037-of-00073.safetensors", + "model.layers.18.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00037-of-00073.safetensors", + "model.layers.19.input_layernorm.weight": "model-00041-of-00073.safetensors", + "model.layers.19.mlp.experts.down_proj": "model-00041-of-00073.safetensors", + "model.layers.19.mlp.experts.down_proj_bias": "model-00041-of-00073.safetensors", + "model.layers.19.mlp.experts.gate_up_proj": "model-00040-of-00073.safetensors", + "model.layers.19.mlp.experts.gate_up_proj_bias": "model-00040-of-00073.safetensors", + "model.layers.19.mlp.router.bias": "model-00039-of-00073.safetensors", + "model.layers.19.mlp.router.weight": "model-00039-of-00073.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00041-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.bias": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight.absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight.nested_absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight.nested_quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight.quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.bias": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight.absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight.nested_absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight.nested_quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight.quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.bias": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight.absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight.nested_absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight.nested_quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight.quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.sinks": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.bias": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight.absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight.nested_absmax": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight.nested_quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight.quant_map": "model-00039-of-00073.safetensors", + "model.layers.19.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00039-of-00073.safetensors", + "model.layers.2.input_layernorm.weight": "model-00007-of-00073.safetensors", + "model.layers.2.mlp.experts.down_proj": "model-00007-of-00073.safetensors", + "model.layers.2.mlp.experts.down_proj_bias": "model-00007-of-00073.safetensors", + "model.layers.2.mlp.experts.gate_up_proj": "model-00006-of-00073.safetensors", + "model.layers.2.mlp.experts.gate_up_proj_bias": "model-00006-of-00073.safetensors", + "model.layers.2.mlp.router.bias": "model-00005-of-00073.safetensors", + "model.layers.2.mlp.router.weight": "model-00005-of-00073.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00007-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.bias": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight.absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight.nested_absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight.nested_quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight.quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.bias": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight.absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight.nested_absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight.nested_quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight.quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.bias": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight.absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight.nested_absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight.nested_quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight.quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.sinks": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.bias": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight.absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight.nested_absmax": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight.quant_map": "model-00005-of-00073.safetensors", + "model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00073.safetensors", + "model.layers.20.input_layernorm.weight": "model-00043-of-00073.safetensors", + "model.layers.20.mlp.experts.down_proj": "model-00043-of-00073.safetensors", + "model.layers.20.mlp.experts.down_proj_bias": "model-00043-of-00073.safetensors", + "model.layers.20.mlp.experts.gate_up_proj": "model-00042-of-00073.safetensors", + "model.layers.20.mlp.experts.gate_up_proj_bias": "model-00042-of-00073.safetensors", + "model.layers.20.mlp.router.bias": "model-00041-of-00073.safetensors", + "model.layers.20.mlp.router.weight": "model-00041-of-00073.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00043-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.bias": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight.absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight.nested_absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight.nested_quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight.quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.bias": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight.absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight.nested_absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight.nested_quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight.quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.bias": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight.absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight.nested_absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight.nested_quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight.quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.sinks": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.bias": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight.absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight.nested_absmax": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight.nested_quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight.quant_map": "model-00041-of-00073.safetensors", + "model.layers.20.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00041-of-00073.safetensors", + "model.layers.21.input_layernorm.weight": "model-00045-of-00073.safetensors", + "model.layers.21.mlp.experts.down_proj": "model-00045-of-00073.safetensors", + "model.layers.21.mlp.experts.down_proj_bias": "model-00045-of-00073.safetensors", + "model.layers.21.mlp.experts.gate_up_proj": "model-00044-of-00073.safetensors", + "model.layers.21.mlp.experts.gate_up_proj_bias": "model-00044-of-00073.safetensors", + "model.layers.21.mlp.router.bias": "model-00043-of-00073.safetensors", + "model.layers.21.mlp.router.weight": "model-00043-of-00073.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00045-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.bias": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight.absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight.nested_absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight.nested_quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight.quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.bias": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight.absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight.nested_absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight.nested_quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight.quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.bias": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight.absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight.nested_absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight.nested_quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight.quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.sinks": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.bias": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight.absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight.nested_absmax": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight.nested_quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight.quant_map": "model-00043-of-00073.safetensors", + "model.layers.21.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00043-of-00073.safetensors", + "model.layers.22.input_layernorm.weight": "model-00047-of-00073.safetensors", + "model.layers.22.mlp.experts.down_proj": "model-00047-of-00073.safetensors", + "model.layers.22.mlp.experts.down_proj_bias": "model-00047-of-00073.safetensors", + "model.layers.22.mlp.experts.gate_up_proj": "model-00046-of-00073.safetensors", + "model.layers.22.mlp.experts.gate_up_proj_bias": "model-00046-of-00073.safetensors", + "model.layers.22.mlp.router.bias": "model-00045-of-00073.safetensors", + "model.layers.22.mlp.router.weight": "model-00045-of-00073.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00047-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.bias": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight.absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight.nested_absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight.nested_quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight.quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.bias": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight.absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight.nested_absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight.nested_quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight.quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.bias": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight.absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight.nested_absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight.nested_quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight.quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.sinks": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.bias": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight.absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight.nested_absmax": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight.quant_map": "model-00045-of-00073.safetensors", + "model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00045-of-00073.safetensors", + "model.layers.23.input_layernorm.weight": "model-00049-of-00073.safetensors", + "model.layers.23.mlp.experts.down_proj": "model-00049-of-00073.safetensors", + "model.layers.23.mlp.experts.down_proj_bias": "model-00049-of-00073.safetensors", + "model.layers.23.mlp.experts.gate_up_proj": "model-00048-of-00073.safetensors", + "model.layers.23.mlp.experts.gate_up_proj_bias": "model-00048-of-00073.safetensors", + "model.layers.23.mlp.router.bias": "model-00047-of-00073.safetensors", + "model.layers.23.mlp.router.weight": "model-00047-of-00073.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00049-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.bias": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight.absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight.nested_absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight.nested_quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight.quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.bias": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight.absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight.nested_absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight.nested_quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight.quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.bias": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight.absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight.nested_absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight.nested_quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight.quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.sinks": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.bias": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight.absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight.nested_absmax": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight.quant_map": "model-00047-of-00073.safetensors", + "model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00047-of-00073.safetensors", + "model.layers.24.input_layernorm.weight": "model-00051-of-00073.safetensors", + "model.layers.24.mlp.experts.down_proj": "model-00051-of-00073.safetensors", + "model.layers.24.mlp.experts.down_proj_bias": "model-00051-of-00073.safetensors", + "model.layers.24.mlp.experts.gate_up_proj": "model-00050-of-00073.safetensors", + "model.layers.24.mlp.experts.gate_up_proj_bias": "model-00050-of-00073.safetensors", + "model.layers.24.mlp.router.bias": "model-00049-of-00073.safetensors", + "model.layers.24.mlp.router.weight": "model-00049-of-00073.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00051-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.bias": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight.absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight.quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.bias": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight.absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight.quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.bias": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight.absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight.quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.sinks": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.bias": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight.absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight.quant_map": "model-00049-of-00073.safetensors", + "model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00049-of-00073.safetensors", + "model.layers.25.input_layernorm.weight": "model-00053-of-00073.safetensors", + "model.layers.25.mlp.experts.down_proj": "model-00053-of-00073.safetensors", + "model.layers.25.mlp.experts.down_proj_bias": "model-00053-of-00073.safetensors", + "model.layers.25.mlp.experts.gate_up_proj": "model-00052-of-00073.safetensors", + "model.layers.25.mlp.experts.gate_up_proj_bias": "model-00052-of-00073.safetensors", + "model.layers.25.mlp.router.bias": "model-00051-of-00073.safetensors", + "model.layers.25.mlp.router.weight": "model-00051-of-00073.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00053-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.bias": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight.absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight.quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.bias": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight.absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight.quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.bias": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight.absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight.quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.sinks": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.bias": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight.absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight.quant_map": "model-00051-of-00073.safetensors", + "model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00051-of-00073.safetensors", + "model.layers.26.input_layernorm.weight": "model-00055-of-00073.safetensors", + "model.layers.26.mlp.experts.down_proj": "model-00055-of-00073.safetensors", + "model.layers.26.mlp.experts.down_proj_bias": "model-00055-of-00073.safetensors", + "model.layers.26.mlp.experts.gate_up_proj": "model-00054-of-00073.safetensors", + "model.layers.26.mlp.experts.gate_up_proj_bias": "model-00054-of-00073.safetensors", + "model.layers.26.mlp.router.bias": "model-00053-of-00073.safetensors", + "model.layers.26.mlp.router.weight": "model-00053-of-00073.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00055-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.bias": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight.absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight.quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.bias": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight.absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight.quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.bias": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight.absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight.quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.sinks": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.bias": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight.absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight.quant_map": "model-00053-of-00073.safetensors", + "model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00053-of-00073.safetensors", + "model.layers.27.input_layernorm.weight": "model-00057-of-00073.safetensors", + "model.layers.27.mlp.experts.down_proj": "model-00057-of-00073.safetensors", + "model.layers.27.mlp.experts.down_proj_bias": "model-00057-of-00073.safetensors", + "model.layers.27.mlp.experts.gate_up_proj": "model-00056-of-00073.safetensors", + "model.layers.27.mlp.experts.gate_up_proj_bias": "model-00056-of-00073.safetensors", + "model.layers.27.mlp.router.bias": "model-00055-of-00073.safetensors", + "model.layers.27.mlp.router.weight": "model-00055-of-00073.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00057-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.bias": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight.absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight.quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.bias": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight.absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight.quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.bias": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight.absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight.quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.sinks": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.bias": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight.absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight.quant_map": "model-00055-of-00073.safetensors", + "model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00055-of-00073.safetensors", + "model.layers.28.input_layernorm.weight": "model-00059-of-00073.safetensors", + "model.layers.28.mlp.experts.down_proj": "model-00059-of-00073.safetensors", + "model.layers.28.mlp.experts.down_proj_bias": "model-00059-of-00073.safetensors", + "model.layers.28.mlp.experts.gate_up_proj": "model-00058-of-00073.safetensors", + "model.layers.28.mlp.experts.gate_up_proj_bias": "model-00058-of-00073.safetensors", + "model.layers.28.mlp.router.bias": "model-00057-of-00073.safetensors", + "model.layers.28.mlp.router.weight": "model-00057-of-00073.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00059-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.bias": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight.absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight.nested_absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight.nested_quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight.quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.bias": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight.absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight.nested_absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight.nested_quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight.quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.bias": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight.absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight.nested_absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight.nested_quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight.quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.sinks": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.bias": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight.absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight.nested_absmax": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight.nested_quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight.quant_map": "model-00057-of-00073.safetensors", + "model.layers.28.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00057-of-00073.safetensors", + "model.layers.29.input_layernorm.weight": "model-00061-of-00073.safetensors", + "model.layers.29.mlp.experts.down_proj": "model-00061-of-00073.safetensors", + "model.layers.29.mlp.experts.down_proj_bias": "model-00061-of-00073.safetensors", + "model.layers.29.mlp.experts.gate_up_proj": "model-00060-of-00073.safetensors", + "model.layers.29.mlp.experts.gate_up_proj_bias": "model-00060-of-00073.safetensors", + "model.layers.29.mlp.router.bias": "model-00059-of-00073.safetensors", + "model.layers.29.mlp.router.weight": "model-00059-of-00073.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00061-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.bias": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight.absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight.nested_absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight.nested_quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight.quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.bias": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight.absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight.nested_absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight.nested_quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight.quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.bias": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight.absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight.nested_absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight.nested_quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight.quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.sinks": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.bias": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight.absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight.nested_absmax": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight.nested_quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight.quant_map": "model-00059-of-00073.safetensors", + "model.layers.29.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00059-of-00073.safetensors", + "model.layers.3.input_layernorm.weight": "model-00009-of-00073.safetensors", + "model.layers.3.mlp.experts.down_proj": "model-00009-of-00073.safetensors", + "model.layers.3.mlp.experts.down_proj_bias": "model-00009-of-00073.safetensors", + "model.layers.3.mlp.experts.gate_up_proj": "model-00008-of-00073.safetensors", + "model.layers.3.mlp.experts.gate_up_proj_bias": "model-00008-of-00073.safetensors", + "model.layers.3.mlp.router.bias": "model-00007-of-00073.safetensors", + "model.layers.3.mlp.router.weight": "model-00007-of-00073.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00009-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.bias": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight.absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight.nested_absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight.nested_quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight.quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.bias": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight.absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight.nested_absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight.nested_quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight.quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.bias": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight.absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight.nested_absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight.nested_quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight.quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.sinks": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.bias": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight.absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight.nested_absmax": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight.nested_quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight.quant_map": "model-00007-of-00073.safetensors", + "model.layers.3.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00007-of-00073.safetensors", + "model.layers.30.input_layernorm.weight": "model-00063-of-00073.safetensors", + "model.layers.30.mlp.experts.down_proj": "model-00063-of-00073.safetensors", + "model.layers.30.mlp.experts.down_proj_bias": "model-00063-of-00073.safetensors", + "model.layers.30.mlp.experts.gate_up_proj": "model-00062-of-00073.safetensors", + "model.layers.30.mlp.experts.gate_up_proj_bias": "model-00062-of-00073.safetensors", + "model.layers.30.mlp.router.bias": "model-00061-of-00073.safetensors", + "model.layers.30.mlp.router.weight": "model-00061-of-00073.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00063-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.bias": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight.absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight.nested_absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight.nested_quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight.quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.bias": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight.absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight.nested_absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight.nested_quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight.quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.bias": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight.absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight.nested_absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight.nested_quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight.quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.sinks": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.bias": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight.absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight.nested_absmax": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight.nested_quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight.quant_map": "model-00061-of-00073.safetensors", + "model.layers.30.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00061-of-00073.safetensors", + "model.layers.31.input_layernorm.weight": "model-00065-of-00073.safetensors", + "model.layers.31.mlp.experts.down_proj": "model-00065-of-00073.safetensors", + "model.layers.31.mlp.experts.down_proj_bias": "model-00065-of-00073.safetensors", + "model.layers.31.mlp.experts.gate_up_proj": "model-00064-of-00073.safetensors", + "model.layers.31.mlp.experts.gate_up_proj_bias": "model-00064-of-00073.safetensors", + "model.layers.31.mlp.router.bias": "model-00063-of-00073.safetensors", + "model.layers.31.mlp.router.weight": "model-00063-of-00073.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00065-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.bias": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight.absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight.nested_absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight.nested_quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight.quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.bias": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight.absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight.nested_absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight.nested_quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight.quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.bias": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight.absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight.nested_absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight.nested_quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight.quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.sinks": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.bias": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight.absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight.nested_absmax": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight.nested_quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight.quant_map": "model-00063-of-00073.safetensors", + "model.layers.31.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00063-of-00073.safetensors", + "model.layers.32.input_layernorm.weight": "model-00067-of-00073.safetensors", + "model.layers.32.mlp.experts.down_proj": "model-00067-of-00073.safetensors", + "model.layers.32.mlp.experts.down_proj_bias": "model-00067-of-00073.safetensors", + "model.layers.32.mlp.experts.gate_up_proj": "model-00066-of-00073.safetensors", + "model.layers.32.mlp.experts.gate_up_proj_bias": "model-00066-of-00073.safetensors", + "model.layers.32.mlp.router.bias": "model-00065-of-00073.safetensors", + "model.layers.32.mlp.router.weight": "model-00065-of-00073.safetensors", + "model.layers.32.post_attention_layernorm.weight": "model-00067-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.bias": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight.absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight.nested_absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight.nested_quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight.quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.bias": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight.absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight.nested_absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight.nested_quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight.quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.bias": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight.absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight.nested_absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight.nested_quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight.quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.sinks": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.bias": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight.absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight.nested_absmax": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight.nested_quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight.quant_map": "model-00065-of-00073.safetensors", + "model.layers.32.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00065-of-00073.safetensors", + "model.layers.33.input_layernorm.weight": "model-00069-of-00073.safetensors", + "model.layers.33.mlp.experts.down_proj": "model-00069-of-00073.safetensors", + "model.layers.33.mlp.experts.down_proj_bias": "model-00069-of-00073.safetensors", + "model.layers.33.mlp.experts.gate_up_proj": "model-00068-of-00073.safetensors", + "model.layers.33.mlp.experts.gate_up_proj_bias": "model-00068-of-00073.safetensors", + "model.layers.33.mlp.router.bias": "model-00067-of-00073.safetensors", + "model.layers.33.mlp.router.weight": "model-00067-of-00073.safetensors", + "model.layers.33.post_attention_layernorm.weight": "model-00069-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.bias": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight.absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight.nested_absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight.nested_quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight.quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.bias": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight.absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight.nested_absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight.nested_quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight.quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.bias": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight.absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight.nested_absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight.nested_quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight.quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.sinks": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.bias": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight.absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight.nested_absmax": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight.nested_quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight.quant_map": "model-00067-of-00073.safetensors", + "model.layers.33.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00067-of-00073.safetensors", + "model.layers.34.input_layernorm.weight": "model-00071-of-00073.safetensors", + "model.layers.34.mlp.experts.down_proj": "model-00071-of-00073.safetensors", + "model.layers.34.mlp.experts.down_proj_bias": "model-00071-of-00073.safetensors", + "model.layers.34.mlp.experts.gate_up_proj": "model-00070-of-00073.safetensors", + "model.layers.34.mlp.experts.gate_up_proj_bias": "model-00070-of-00073.safetensors", + "model.layers.34.mlp.router.bias": "model-00069-of-00073.safetensors", + "model.layers.34.mlp.router.weight": "model-00069-of-00073.safetensors", + "model.layers.34.post_attention_layernorm.weight": "model-00071-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.bias": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight.absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight.nested_absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight.nested_quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight.quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.bias": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight.absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight.nested_absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight.nested_quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight.quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.bias": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight.absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight.nested_absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight.nested_quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight.quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.sinks": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.bias": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight.absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight.nested_absmax": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight.nested_quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight.quant_map": "model-00069-of-00073.safetensors", + "model.layers.34.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00069-of-00073.safetensors", + "model.layers.35.input_layernorm.weight": "model-00073-of-00073.safetensors", + "model.layers.35.mlp.experts.down_proj": "model-00073-of-00073.safetensors", + "model.layers.35.mlp.experts.down_proj_bias": "model-00073-of-00073.safetensors", + "model.layers.35.mlp.experts.gate_up_proj": "model-00072-of-00073.safetensors", + "model.layers.35.mlp.experts.gate_up_proj_bias": "model-00072-of-00073.safetensors", + "model.layers.35.mlp.router.bias": "model-00071-of-00073.safetensors", + "model.layers.35.mlp.router.weight": "model-00071-of-00073.safetensors", + "model.layers.35.post_attention_layernorm.weight": "model-00073-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.bias": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight.absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight.nested_absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight.nested_quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight.quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.bias": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight.absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight.nested_absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight.nested_quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight.quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.bias": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight.absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight.nested_absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight.nested_quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight.quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.sinks": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.bias": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight.absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight.nested_absmax": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight.nested_quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight.quant_map": "model-00071-of-00073.safetensors", + "model.layers.35.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00071-of-00073.safetensors", + "model.layers.4.input_layernorm.weight": "model-00011-of-00073.safetensors", + "model.layers.4.mlp.experts.down_proj": "model-00011-of-00073.safetensors", + "model.layers.4.mlp.experts.down_proj_bias": "model-00011-of-00073.safetensors", + "model.layers.4.mlp.experts.gate_up_proj": "model-00010-of-00073.safetensors", + "model.layers.4.mlp.experts.gate_up_proj_bias": "model-00010-of-00073.safetensors", + "model.layers.4.mlp.router.bias": "model-00009-of-00073.safetensors", + "model.layers.4.mlp.router.weight": "model-00009-of-00073.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00011-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.bias": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight.absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight.nested_absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight.nested_quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight.quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.bias": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight.absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight.nested_absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight.nested_quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight.quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.bias": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight.absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight.nested_absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight.nested_quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight.quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.sinks": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.bias": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight.absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight.nested_absmax": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight.nested_quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight.quant_map": "model-00009-of-00073.safetensors", + "model.layers.4.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00009-of-00073.safetensors", + "model.layers.5.input_layernorm.weight": "model-00013-of-00073.safetensors", + "model.layers.5.mlp.experts.down_proj": "model-00013-of-00073.safetensors", + "model.layers.5.mlp.experts.down_proj_bias": "model-00013-of-00073.safetensors", + "model.layers.5.mlp.experts.gate_up_proj": "model-00012-of-00073.safetensors", + "model.layers.5.mlp.experts.gate_up_proj_bias": "model-00012-of-00073.safetensors", + "model.layers.5.mlp.router.bias": "model-00011-of-00073.safetensors", + "model.layers.5.mlp.router.weight": "model-00011-of-00073.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00013-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.bias": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight.absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight.nested_absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight.nested_quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight.quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.bias": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight.absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight.nested_absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight.nested_quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight.quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.bias": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight.absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight.nested_absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight.nested_quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight.quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.sinks": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.bias": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight.absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight.nested_absmax": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight.nested_quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight.quant_map": "model-00011-of-00073.safetensors", + "model.layers.5.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00011-of-00073.safetensors", + "model.layers.6.input_layernorm.weight": "model-00015-of-00073.safetensors", + "model.layers.6.mlp.experts.down_proj": "model-00015-of-00073.safetensors", + "model.layers.6.mlp.experts.down_proj_bias": "model-00015-of-00073.safetensors", + "model.layers.6.mlp.experts.gate_up_proj": "model-00014-of-00073.safetensors", + "model.layers.6.mlp.experts.gate_up_proj_bias": "model-00014-of-00073.safetensors", + "model.layers.6.mlp.router.bias": "model-00013-of-00073.safetensors", + "model.layers.6.mlp.router.weight": "model-00013-of-00073.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00015-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.bias": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight.absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight.nested_absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight.nested_quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight.quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.bias": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight.absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight.nested_absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight.nested_quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight.quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.bias": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight.absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight.nested_absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight.nested_quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight.quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.sinks": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.bias": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight.absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight.nested_absmax": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight.nested_quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight.quant_map": "model-00013-of-00073.safetensors", + "model.layers.6.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00013-of-00073.safetensors", + "model.layers.7.input_layernorm.weight": "model-00017-of-00073.safetensors", + "model.layers.7.mlp.experts.down_proj": "model-00017-of-00073.safetensors", + "model.layers.7.mlp.experts.down_proj_bias": "model-00017-of-00073.safetensors", + "model.layers.7.mlp.experts.gate_up_proj": "model-00016-of-00073.safetensors", + "model.layers.7.mlp.experts.gate_up_proj_bias": "model-00016-of-00073.safetensors", + "model.layers.7.mlp.router.bias": "model-00015-of-00073.safetensors", + "model.layers.7.mlp.router.weight": "model-00015-of-00073.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00017-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.bias": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight.absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight.nested_absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight.nested_quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight.quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.bias": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight.absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight.nested_absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight.nested_quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight.quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.bias": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight.absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight.nested_absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight.nested_quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight.quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.sinks": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.bias": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight.absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight.nested_absmax": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight.nested_quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight.quant_map": "model-00015-of-00073.safetensors", + "model.layers.7.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00015-of-00073.safetensors", + "model.layers.8.input_layernorm.weight": "model-00019-of-00073.safetensors", + "model.layers.8.mlp.experts.down_proj": "model-00019-of-00073.safetensors", + "model.layers.8.mlp.experts.down_proj_bias": "model-00019-of-00073.safetensors", + "model.layers.8.mlp.experts.gate_up_proj": "model-00018-of-00073.safetensors", + "model.layers.8.mlp.experts.gate_up_proj_bias": "model-00018-of-00073.safetensors", + "model.layers.8.mlp.router.bias": "model-00017-of-00073.safetensors", + "model.layers.8.mlp.router.weight": "model-00017-of-00073.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00019-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.bias": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight.absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight.nested_absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight.nested_quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight.quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.bias": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight.absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight.nested_absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight.nested_quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight.quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.bias": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight.absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight.nested_absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight.nested_quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight.quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.sinks": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.bias": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight.absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight.nested_absmax": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight.nested_quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight.quant_map": "model-00017-of-00073.safetensors", + "model.layers.8.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00017-of-00073.safetensors", + "model.layers.9.input_layernorm.weight": "model-00021-of-00073.safetensors", + "model.layers.9.mlp.experts.down_proj": "model-00021-of-00073.safetensors", + "model.layers.9.mlp.experts.down_proj_bias": "model-00021-of-00073.safetensors", + "model.layers.9.mlp.experts.gate_up_proj": "model-00020-of-00073.safetensors", + "model.layers.9.mlp.experts.gate_up_proj_bias": "model-00020-of-00073.safetensors", + "model.layers.9.mlp.router.bias": "model-00019-of-00073.safetensors", + "model.layers.9.mlp.router.weight": "model-00019-of-00073.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00021-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.bias": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight.absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight.nested_absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight.nested_quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight.quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.bias": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight.absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight.nested_absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight.nested_quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight.quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.bias": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight.absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight.nested_absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight.nested_quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight.quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.sinks": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.bias": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight.absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight.nested_absmax": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight.nested_quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight.quant_map": "model-00019-of-00073.safetensors", + "model.layers.9.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00019-of-00073.safetensors", + "model.norm.weight": "model-00073-of-00073.safetensors" + } +} diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..6274cc1bd159aa75de771315558e5cac7dd8bea0 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,23 @@ +{ + "bos_token": { + "content": "<|startoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|return|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..6ec3ef1795cbbda6b7cb7d1f114919cbe3fdd647 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0614fe83cadab421296e664e1f48f4261fa8fef6e03e63bb75c20f38e37d07d3 +size 27868174 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..c021cddb0a9dd35b1bf83a9f145be2d9b3757891 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,183 @@ +{ + "added_tokens_decoder": { + "199998": { + "content": "<|startoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "199999": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200000": { + "content": "<|reserved_200000|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200001": { + "content": "<|reserved_200001|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200002": { + "content": "<|return|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200003": { + "content": "<|constrain|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200004": { + "content": "<|reserved_200004|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200005": { + "content": "<|channel|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200006": { + "content": "<|start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200007": { + "content": "<|end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200008": { + "content": "<|message|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200009": { + "content": "<|reserved_200009|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200010": { + "content": "<|reserved_200010|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200011": { + "content": "<|reserved_200011|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200012": { + "content": "<|call|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200013": { + "content": "<|reserved_200013|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200014": { + "content": "<|reserved_200014|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200015": { + "content": "<|reserved_200015|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200016": { + "content": "<|reserved_200016|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200017": { + "content": "<|reserved_200017|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "200018": { + "content": "<|endofprompt|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "bos_token": "<|startoftext|>", + "clean_up_tokenization_spaces": false, + "eos_token": "<|return|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 1000000000000000019884624838656, + "pad_token": "<|endoftext|>", + "tokenizer_class": "PreTrainedTokenizerFast" +}