Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- .ipynb_checkpoints/README-checkpoint.md +682 -0
- .mdl +0 -0
- .msc +0 -0
- .mv +1 -0
- README.md +682 -0
- chat_template.jinja +171 -0
- config.json +41 -0
- configuration.json +1 -0
- generation_config.json +10 -0
- model-00001-of-00008.safetensors +3 -0
- model-00002-of-00008.safetensors +3 -0
- model-00003-of-00008.safetensors +3 -0
- model-00004-of-00008.safetensors +3 -0
- model-00005-of-00008.safetensors +3 -0
- model-00006-of-00008.safetensors +3 -0
- model-00007-of-00008.safetensors +3 -0
- model-00008-of-00008.safetensors +3 -0
- model.safetensors.index.json +0 -0
- special_tokens_map.json +23 -0
- thinking_budget.png +3 -0
- tokenizer.json +3 -0
- tokenizer_config.json +1035 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
thinking_budget.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
.ipynb_checkpoints/README-checkpoint.md
ADDED
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
library_name: transformers
|
4 |
+
pipeline_tag: text-generation
|
5 |
+
tags:
|
6 |
+
- vLLM
|
7 |
+
- AWQ
|
8 |
+
language:
|
9 |
+
- zh
|
10 |
+
- en
|
11 |
+
base_model:
|
12 |
+
- ByteDance-Seed/Seed-OSS-36B-Instruct
|
13 |
+
base_model_relation: quantized
|
14 |
+
|
15 |
+
---
|
16 |
+
# Seed-OSS-36B-Instruct-AWQ
|
17 |
+
Base model: [ByteDance-Seed/Seed-OSS-36B-Instruct](https://huggingface.co/ByteDance-Seed/Seed-OSS-36B-Instruct)
|
18 |
+
|
19 |
+
### 【vLLM Single Node with 2 GPUs — Startup Command】
|
20 |
+
```
|
21 |
+
CONTEXT_LENGTH=32768
|
22 |
+
|
23 |
+
vllm serve \
|
24 |
+
QuantTrio/Seed-OSS-36B-Instruct-AWQ \
|
25 |
+
--served-model-name Seed-OSS-36B-Instruct-AWQ \
|
26 |
+
--enable-auto-tool-choice \
|
27 |
+
--tool-call-parser seed_oss \
|
28 |
+
--chat-template ./Seed-OSS-36B-Instruct-AWQ/chat_template.jinja \
|
29 |
+
--swap-space 4 \
|
30 |
+
--max-num-seqs 512 \
|
31 |
+
--max-model-len $CONTEXT_LENGTH \
|
32 |
+
--max-seq-len-to-capture $CONTEXT_LENGTH \
|
33 |
+
--gpu-memory-utilization 0.9 \
|
34 |
+
--tensor-parallel-size 2 \
|
35 |
+
--trust-remote-code \
|
36 |
+
--disable-log-requests \
|
37 |
+
--host 0.0.0.0 \
|
38 |
+
--port 8000
|
39 |
+
```
|
40 |
+
|
41 |
+
### 【Dependencies / Installation】
|
42 |
+
As of **2025-08-21**, create a fresh Python environment and run:
|
43 |
+
|
44 |
+
```bash
|
45 |
+
|
46 |
+
VLLM_USE_PRECOMPILED=1 pip install git+https://github.com/FoolPlayer/vllm.git@seed-oss
|
47 |
+
pip install git+https://github.com/Fazziekey/transformers.git@seed-oss
|
48 |
+
```
|
49 |
+
|
50 |
+
### 【Logs】
|
51 |
+
```
|
52 |
+
2025-08-21
|
53 |
+
1. Initial commit
|
54 |
+
```
|
55 |
+
|
56 |
+
### 【Model Files】
|
57 |
+
| File Size | Last Updated |
|
58 |
+
|-----------|--------------|
|
59 |
+
| `20GB` | `2025-08-21` |
|
60 |
+
|
61 |
+
### 【Model Download】
|
62 |
+
```python
|
63 |
+
from huggingface_hub import snapshot_download
|
64 |
+
snapshot_download('QuantTrio/Seed-OSS-36B-Instruct-AWQ', cache_dir="your_local_path")
|
65 |
+
```
|
66 |
+
|
67 |
+
### 【Overview】
|
68 |
+
## Introduction
|
69 |
+
<div align="center">
|
70 |
+
👋 Hi, everyone!
|
71 |
+
<br>
|
72 |
+
We are <b>ByteDance Seed Team.</b>
|
73 |
+
</div>
|
74 |
+
|
75 |
+
<p align="center">
|
76 |
+
You can get to know us better through the following channels👇
|
77 |
+
<br>
|
78 |
+
<a href="https://seed.bytedance.com/">
|
79 |
+
<img src="https://img.shields.io/badge/Website-%231e37ff?style=for-the-badge&logo=bytedance&logoColor=white"></a>
|
80 |
+
</p>
|
81 |
+
|
82 |
+

|
83 |
+
|
84 |
+
|
85 |
+
# Seed-OSS Open-Source Models
|
86 |
+
<p align="center">
|
87 |
+
<a href="https://github.com/ByteDance-Seed/seed-oss">
|
88 |
+
<img src="https://img.shields.io/badge/Seed-Project Page-yellow"></a>
|
89 |
+
<a href="https://github.com/ByteDance-Seed/seed-oss">
|
90 |
+
<img src="https://img.shields.io/badge/Seed-Tech Report Coming Soon-red"></a>
|
91 |
+
<a href="https://huggingface.co/ByteDance-Seed">
|
92 |
+
<img src="https://img.shields.io/badge/Seed-Hugging Face-orange"></a>
|
93 |
+
<br>
|
94 |
+
<a href="./LICENSE">
|
95 |
+
<img src="https://img.shields.io/badge/License-Apache2.0-blue"></a>
|
96 |
+
</p>
|
97 |
+
|
98 |
+
> [!NOTE]
|
99 |
+
> This model card is dedicated to the `Seed-OSS-36B-Instruct` model.
|
100 |
+
|
101 |
+
## News
|
102 |
+
- [2025/08/20]🔥We release `Seed-OSS-36B-Base` (both with and without synthetic data versions) and `Seed-OSS-36B-Instruct`.
|
103 |
+
|
104 |
+
## Introduction
|
105 |
+
Seed-OSS is a series of open-source large language models developed by ByteDance's Seed Team, designed for powerful long-context, reasoning, agent and general capabilities, and versatile developer-friendly features. Although trained with only 12T tokens, Seed-OSS achieves excellent performance on several popular open benchmarks.
|
106 |
+
|
107 |
+
We release this series of models to the open-source community under the Apache-2.0 license.
|
108 |
+
|
109 |
+
> [!NOTE]
|
110 |
+
> Seed-OSS is primarily optimized for international (i18n) use cases.
|
111 |
+
|
112 |
+
### Key Features
|
113 |
+
- **Flexible Control of Thinking Budget**: Allowing users to flexibly adjust the reasoning length as needed. This capability of dynamically controlling the reasoning length enhances inference efficiency in practical application scenarios.
|
114 |
+
- **Enhanced Reasoning Capability**: Specifically optimized for reasoning tasks while maintaining balanced and excellent general capabilities.
|
115 |
+
- **Agentic Intelligence**: Performs exceptionally well in agentic tasks such as tool-using and issue resolving.
|
116 |
+
- **Research-Friendly**: Given that the inclusion of synthetic instruction data in pre-training may affect the post-training research, we released pre-trained models both with and without instruction data, providing the research community with more diverse options.
|
117 |
+
- **Native Long Context**: Trained with up-to-512K long context natively.
|
118 |
+
|
119 |
+
### Model Summary
|
120 |
+
|
121 |
+
Seed-OSS adopts the popular causal language model architecture with RoPE, GQA attention, RMSNorm and SwiGLU activation.
|
122 |
+
|
123 |
+
<div align="center">
|
124 |
+
|
125 |
+
| | |
|
126 |
+
|:---:|:---:|
|
127 |
+
| | **Seed-OSS-36B** |
|
128 |
+
| **Parameters** | 36B |
|
129 |
+
| **Attention** | GQA |
|
130 |
+
| **Activation Function** | SwiGLU |
|
131 |
+
| **Number of Layers** | 64 |
|
132 |
+
| **Number of QKV Heads** | 80 / 8 / 8 |
|
133 |
+
| **Head Size** | 128 |
|
134 |
+
| **Hidden Size** | 5120 |
|
135 |
+
| **Vocabulary Size** | 155K |
|
136 |
+
| **Context Length** | 512K |
|
137 |
+
| **RoPE Base Frequency** | 1e7 |
|
138 |
+
|
139 |
+
</div>
|
140 |
+
|
141 |
+
|
142 |
+
## Evaluation Results
|
143 |
+
|
144 |
+
### Seed-OSS-36B-Base
|
145 |
+
|
146 |
+
Incorporating synthetic instruction data into pretraining leads to improved performance on most benchmarks. We adopt the version augmented with synthetic instruction data (i.e., *w/ syn.*) as `Seed-OSS-36B-Base`. We also release `Seed-OSS-36B-Base-woSyn` trained without such data (i.e., *w/o syn.*), offering the community a high-performance foundation model unaffected by synthetic instruction data.
|
147 |
+
|
148 |
+
<div align="center">
|
149 |
+
<table>
|
150 |
+
<thead>
|
151 |
+
<tr>
|
152 |
+
<th align="center">Benchmark</th>
|
153 |
+
<th align="center"><sup><a href="https://seed.bytedance.com/en/seed1_6">Seed1.6-Base</a></sup></th>
|
154 |
+
<th align="center"><sup>Qwen3-30B-A3B-Base-2507*</sup></th>
|
155 |
+
<th align="center"><sup>Qwen2.5-32B-Base*</sup></th>
|
156 |
+
<th align="center"><sup>Seed-OSS-36B-Base<br>(<i>w/ syn.</i>)</sup></th>
|
157 |
+
<th align="center"><sup>Seed-OSS-36B-Base-woSyn<br>(<i>w/o syn.</i>)</sup></th>
|
158 |
+
</tr>
|
159 |
+
</thead>
|
160 |
+
<tbody>
|
161 |
+
<tr>
|
162 |
+
<td align="center" colspan=6><strong>Knowledge</strong></td>
|
163 |
+
</tr>
|
164 |
+
<tr>
|
165 |
+
<td align="center">MMLU-Pro</td>
|
166 |
+
<td align="center">70</td>
|
167 |
+
<td align="center">59.8</td>
|
168 |
+
<td align="center">58.5 (55.1)</td>
|
169 |
+
<td align="center"><b>65.1</b></td>
|
170 |
+
<td align="center">60.4</td>
|
171 |
+
</tr>
|
172 |
+
<tr>
|
173 |
+
<td align="center">MMLU</td>
|
174 |
+
<td align="center">88.8</td>
|
175 |
+
<td align="center">82.7</td>
|
176 |
+
<td align="center">84 (83.3)</td>
|
177 |
+
<td align="center"><b>84.9</b></td>
|
178 |
+
<td align="center">84.8</td>
|
179 |
+
</tr>
|
180 |
+
<tr>
|
181 |
+
<td align="center">TriviaQA</td>
|
182 |
+
<td align="center">91</td>
|
183 |
+
<td align="center">76.2</td>
|
184 |
+
<td align="center">76</td>
|
185 |
+
<td align="center"><b>82.1</b></td>
|
186 |
+
<td align="center">81.9</td>
|
187 |
+
</tr>
|
188 |
+
<tr>
|
189 |
+
<td align="center">GPQA-D</td>
|
190 |
+
<td align="center">43.4</td>
|
191 |
+
<td align="center"><b>37</b></td>
|
192 |
+
<td align="center">29.3</td>
|
193 |
+
<td align="center">31.7</td>
|
194 |
+
<td align="center">35.2</td>
|
195 |
+
</tr>
|
196 |
+
<tr>
|
197 |
+
<td align="center">SimpleQA</td>
|
198 |
+
<td align="center">17.1</td>
|
199 |
+
<td align="center">7.2</td>
|
200 |
+
<td align="center">6.1</td>
|
201 |
+
<td align="center">5.8</td>
|
202 |
+
<td align="center"><b>7.4</b></td>
|
203 |
+
</tr>
|
204 |
+
|
205 |
+
<tr>
|
206 |
+
<td align="center" colspan=6><strong>Reasoning</strong></td>
|
207 |
+
</tr>
|
208 |
+
<tr>
|
209 |
+
<td align="center">BBH</td>
|
210 |
+
<td align="center">92.1</td>
|
211 |
+
<td align="center">81.4</td>
|
212 |
+
<td align="center">79.1 (84.5)</td>
|
213 |
+
<td align="center"><b>87.7</b></td>
|
214 |
+
<td align="center">87.2</td>
|
215 |
+
</tr>
|
216 |
+
<tr>
|
217 |
+
<td align="center">AGIEval-en</td>
|
218 |
+
<td align="center">78</td>
|
219 |
+
<td align="center">66.4</td>
|
220 |
+
<td align="center">65.6</td>
|
221 |
+
<td align="center"><b>70.7</b></td>
|
222 |
+
<td align="center">70.1</td>
|
223 |
+
</tr>
|
224 |
+
|
225 |
+
<tr>
|
226 |
+
<td align="center" colspan=6><strong>Math</strong></td>
|
227 |
+
</tr>
|
228 |
+
<tr>
|
229 |
+
<td align="center">GSM8K</td>
|
230 |
+
<td align="center">93.1</td>
|
231 |
+
<td align="center">87</td>
|
232 |
+
<td align="center">87.5 (92.9)</td>
|
233 |
+
<td align="center"><b>90.8</b></td>
|
234 |
+
<td align="center">90.3</td>
|
235 |
+
</tr>
|
236 |
+
<tr>
|
237 |
+
<td align="center">MATH</td>
|
238 |
+
<td align="center">72.9</td>
|
239 |
+
<td align="center">61.1</td>
|
240 |
+
<td align="center">63.5 (57.7)</td>
|
241 |
+
<td align="center"><b>81.7</b></td>
|
242 |
+
<td align="center">61.3</td>
|
243 |
+
</tr>
|
244 |
+
|
245 |
+
<tr>
|
246 |
+
<td align="center" colspan=6><strong>Coding</strong></td>
|
247 |
+
</tr>
|
248 |
+
<tr>
|
249 |
+
<td align="center">MBPP</td>
|
250 |
+
<td align="center">83.6</td>
|
251 |
+
<td align="center">78.8</td>
|
252 |
+
<td align="center">77.8 (84.5)</td>
|
253 |
+
<td align="center"><b>80.6</b></td>
|
254 |
+
<td align="center">74.6</td>
|
255 |
+
</tr>
|
256 |
+
<tr>
|
257 |
+
<td align="center">HumanEval</td>
|
258 |
+
<td align="center">78</td>
|
259 |
+
<td align="center">70.7</td>
|
260 |
+
<td align="center">47.6 (58.5)</td>
|
261 |
+
<td align="center"><b>76.8</b></td>
|
262 |
+
<td align="center">75.6</td>
|
263 |
+
</tr>
|
264 |
+
</tbody>
|
265 |
+
</table>
|
266 |
+
</div>
|
267 |
+
|
268 |
+
<sup>
|
269 |
+
- <b>Bold</b> denotes open-source SOTA.
|
270 |
+
</sup><br/><sup>
|
271 |
+
- "*" indicates that the results in this column are presented in the format of "reproduced_results (reported_results_if_any)".
|
272 |
+
</sup>
|
273 |
+
|
274 |
+
### Seed-OSS-36B-Instruct
|
275 |
+
|
276 |
+
<div align="center">
|
277 |
+
<table>
|
278 |
+
<thead>
|
279 |
+
<tr>
|
280 |
+
<th align="center">Benchmark</th>
|
281 |
+
<th align="center"><sup><a href="https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6-thinking">Seed1.6-Thinking-0715</a></sup></th>
|
282 |
+
<th align="center"><sup>OAI-OSS-20B*</sup></th>
|
283 |
+
<th align="center"><sup>Qwen3-30B-A3B-Thinking-2507*</sup></th>
|
284 |
+
<th align="center"><sup>Qwen3-32B*</sup></th>
|
285 |
+
<th align="center"><sup>Gemma3-27B</sup></th>
|
286 |
+
<th align="center"><sup>Seed-OSS-36B-Instruct</sup></th>
|
287 |
+
</tr>
|
288 |
+
</thead>
|
289 |
+
<tbody>
|
290 |
+
<tr>
|
291 |
+
<td align="center" colspan=7><strong>Knowledge</strong></td>
|
292 |
+
</tr>
|
293 |
+
<tr>
|
294 |
+
<td align="center">MMLU-Pro</td>
|
295 |
+
<td align="center">86.6</td>
|
296 |
+
<td align="center">76.2</td>
|
297 |
+
<td align="center"><ins>81.9</ins> (80.9)</td>
|
298 |
+
<td align="center">81.8</td>
|
299 |
+
<td align="center">67.5</td>
|
300 |
+
<td align="center"><b>82.7</b></td>
|
301 |
+
</tr>
|
302 |
+
<tr>
|
303 |
+
<td align="center">MMLU</td>
|
304 |
+
<td align="center">90.6</td>
|
305 |
+
<td align="center">81.7 (85.3)</td>
|
306 |
+
<td align="center"><ins>86.9</ins></td>
|
307 |
+
<td align="center">86.2</td>
|
308 |
+
<td align="center">76.9</td>
|
309 |
+
<td align="center"><b>87.4</b></td>
|
310 |
+
</tr>
|
311 |
+
<tr>
|
312 |
+
<td align="center">GPQA-D</td>
|
313 |
+
<td align="center">80.7</td>
|
314 |
+
<td align="center"><b>72.2</b> (71.5)</td>
|
315 |
+
<td align="center"><ins>71.4</ins> (73.4)</td>
|
316 |
+
<td align="center">66.7 (68.4)</td>
|
317 |
+
<td align="center">42.4</td>
|
318 |
+
<td align="center"><ins>71.4</ins></td>
|
319 |
+
</tr>
|
320 |
+
<tr>
|
321 |
+
<td align="center">SuperGPQA</td>
|
322 |
+
<td align="center">63.4</td>
|
323 |
+
<td align="center">50.1</td>
|
324 |
+
<td align="center"><b>57.3</b> (56.8)</td>
|
325 |
+
<td align="center">49.3</td>
|
326 |
+
<td align="center">-</td>
|
327 |
+
<td align="center"><ins>55.7</ins></td>
|
328 |
+
</tr>
|
329 |
+
<tr>
|
330 |
+
<td align="center">SimpleQA</td>
|
331 |
+
<td align="center">23.7</td>
|
332 |
+
<td align="center">6.7</td>
|
333 |
+
<td align="center"><b>23.6</b></td>
|
334 |
+
<td align="center">8.6</td>
|
335 |
+
<td align="center"><ins>10</ins></td>
|
336 |
+
<td align="center">9.7</td>
|
337 |
+
</tr>
|
338 |
+
|
339 |
+
<tr>
|
340 |
+
<td align="center" colspan=7><strong>Math</strong></td>
|
341 |
+
</tr>
|
342 |
+
<tr>
|
343 |
+
<td align="center">AIME24</td>
|
344 |
+
<td align="center">90.3</td>
|
345 |
+
<td align="center"><b>92.7</b> (92.1)</td>
|
346 |
+
<td align="center">87.7</td>
|
347 |
+
<td align="center">82.7 (81.4)</td>
|
348 |
+
<td align="center">-</td>
|
349 |
+
<td align="center"><ins>91.7</ins></td>
|
350 |
+
</tr>
|
351 |
+
<tr>
|
352 |
+
<td align="center">AIME25</td>
|
353 |
+
<td align="center">86</td>
|
354 |
+
<td align="center"><b>90.3</b> (91.7)</td>
|
355 |
+
<td align="center">81.3 (85)</td>
|
356 |
+
<td align="center">73.3 (72.9)</td>
|
357 |
+
<td align="center">-</td>
|
358 |
+
<td align="center"><ins>84.7</ins></td>
|
359 |
+
</tr>
|
360 |
+
<tr>
|
361 |
+
<td align="center">BeyondAIME</td>
|
362 |
+
<td align="center">60</td>
|
363 |
+
<td align="center"><b>69</b></td>
|
364 |
+
<td align="center">56</td>
|
365 |
+
<td align="center">29</td>
|
366 |
+
<td align="center">-</td>
|
367 |
+
<td align="center"><ins>65</ins></td>
|
368 |
+
</tr>
|
369 |
+
|
370 |
+
<tr>
|
371 |
+
<td align="center" colspan=7><strong>Reasoning</strong></td>
|
372 |
+
</tr>
|
373 |
+
<tr>
|
374 |
+
<td align="center">ArcAGI V2</td>
|
375 |
+
<td align="center">50.3</td>
|
376 |
+
<td align="center"><b>41.7</b></td>
|
377 |
+
<td align="center">37.8</td>
|
378 |
+
<td align="center">14.4</td>
|
379 |
+
<td align="center">-</td>
|
380 |
+
<td align="center"><ins>40.6</ins></td>
|
381 |
+
</tr>
|
382 |
+
<tr>
|
383 |
+
<td align="center">KORBench</td>
|
384 |
+
<td align="center">74.8</td>
|
385 |
+
<td align="center"><b>72.3</b></td>
|
386 |
+
<td align="center">70.2</td>
|
387 |
+
<td align="center">65.4</td>
|
388 |
+
<td align="center">-</td>
|
389 |
+
<td align="center"><ins>70.6</ins></td>
|
390 |
+
</tr>
|
391 |
+
|
392 |
+
<tr>
|
393 |
+
<td align="center" colspan=7><strong>Coding</strong></td>
|
394 |
+
</tr>
|
395 |
+
<tr>
|
396 |
+
<td align="center">LiveCodeBench v6<br/><sup>(02/2025-05/2025)</sup></td>
|
397 |
+
<td align="center">66.8</td>
|
398 |
+
<td align="center"><ins>63.8</ins></td>
|
399 |
+
<td align="center">60.3 (66)</td>
|
400 |
+
<td align="center">53.4</td>
|
401 |
+
<td align="center">-</td>
|
402 |
+
<td align="center"><b>67.4</b></td>
|
403 |
+
</tr>
|
404 |
+
<tr>
|
405 |
+
<td align="center">HLE</td>
|
406 |
+
<td align="center">13.9</td>
|
407 |
+
<td align="center"><b>12.7</b> (10.9)</td>
|
408 |
+
<td align="center">8.7</td>
|
409 |
+
<td align="center">6.9</td>
|
410 |
+
<td align="center">-</td>
|
411 |
+
<td align="center"><ins>10.1</ins></td>
|
412 |
+
</tr>
|
413 |
+
|
414 |
+
<tr>
|
415 |
+
<td align="center" colspan=7><strong>Instruction Following</strong></td>
|
416 |
+
</tr>
|
417 |
+
<tr>
|
418 |
+
<td align="center">IFEval</td>
|
419 |
+
<td align="center">86.3</td>
|
420 |
+
<td align="center"><b>92.8</b></td>
|
421 |
+
<td align="center">88 (88.9)</td>
|
422 |
+
<td align="center">88.4 (85)</td>
|
423 |
+
<td align="center"><ins>90.4</ins></td>
|
424 |
+
<td align="center">85.8</td>
|
425 |
+
</tr>
|
426 |
+
|
427 |
+
|
428 |
+
<tr>
|
429 |
+
<td align="center" colspan=7><strong>Agent</strong></td>
|
430 |
+
</tr>
|
431 |
+
<tr>
|
432 |
+
<td align="center">TAU1-Retail</td>
|
433 |
+
<td align="center">63</td>
|
434 |
+
<td align="center">(54.8)</td>
|
435 |
+
<td align="center"><ins>58.7</ins> (67.8)</td>
|
436 |
+
<td align="center">40.9</td>
|
437 |
+
<td align="center">-</td>
|
438 |
+
<td align="center"><b>70.4</b></td>
|
439 |
+
</tr>
|
440 |
+
<tr>
|
441 |
+
<td align="center">TAU1-Airline</td>
|
442 |
+
<td align="center">49</td>
|
443 |
+
<td align="center">(38)</td>
|
444 |
+
<td align="center"><b>47</b> (48)</td>
|
445 |
+
<td align="center">38</td>
|
446 |
+
<td align="center">-</td>
|
447 |
+
<td align="center"><ins>46</ins></td>
|
448 |
+
</tr>
|
449 |
+
<tr>
|
450 |
+
<td align="center">SWE-Bench Verified<br/><sup>(OpenHands)</sup></td>
|
451 |
+
<td align="center">41.8</td>
|
452 |
+
<td align="center"><b>(60.7)</b></td>
|
453 |
+
<td align="center">31</td>
|
454 |
+
<td align="center">23.4</td>
|
455 |
+
<td align="center">-</td>
|
456 |
+
<td align="center"><ins>56</ins></td>
|
457 |
+
</tr>
|
458 |
+
<tr>
|
459 |
+
<td align="center">SWE-Bench Verified<br/><sup>(AgentLess 4*10)</sup></td>
|
460 |
+
<td align="center">48.4</td>
|
461 |
+
<td align="center">-</td>
|
462 |
+
<td align="center">33.5</td>
|
463 |
+
<td align="center"><ins>39.7</ins></td>
|
464 |
+
<td align="center">-</td>
|
465 |
+
<td align="center"><b>47</b></td>
|
466 |
+
</tr>
|
467 |
+
<tr>
|
468 |
+
<td align="center">Multi-SWE-Bench</td>
|
469 |
+
<td align="center">17.7</td>
|
470 |
+
<td align="center">-</td>
|
471 |
+
<td align="center"><ins>9.5</ins></td>
|
472 |
+
<td align="center">7.7</td>
|
473 |
+
<td align="center">-</td>
|
474 |
+
<td align="center"><b>17</b></td>
|
475 |
+
</tr>
|
476 |
+
|
477 |
+
<tr>
|
478 |
+
<td align="center" colspan=7><strong>Multilingualism</strong></td>
|
479 |
+
</tr>
|
480 |
+
<tr>
|
481 |
+
<td align="center">MMMLU</td>
|
482 |
+
<td align="center">84.3</td>
|
483 |
+
<td align="center">77.4 (75.7)</td>
|
484 |
+
<td align="center"><b>79</b></td>
|
485 |
+
<td align="center"><b>79</b> (80.6)</td>
|
486 |
+
<td align="center">-</td>
|
487 |
+
<td align="center"><ins>78.4</ins></td>
|
488 |
+
</tr>
|
489 |
+
|
490 |
+
<tr>
|
491 |
+
<td align="center" colspan=7><strong>Long Context</strong></td>
|
492 |
+
</tr>
|
493 |
+
<tr>
|
494 |
+
<td align="center">RULER<br/><sup>(128K)</sup></td>
|
495 |
+
<td align="center">94.5</td>
|
496 |
+
<td align="center">78.7</td>
|
497 |
+
<td align="center"><ins>94.5</ins></td>
|
498 |
+
<td align="center">77.5</td>
|
499 |
+
<td align="center">-</td>
|
500 |
+
<td align="center"><b>94.6</b></td>
|
501 |
+
</tr>
|
502 |
+
|
503 |
+
<tr>
|
504 |
+
<td align="center" colspan=7><strong>Safety</strong></td>
|
505 |
+
</tr>
|
506 |
+
<tr>
|
507 |
+
<td align="center">AIR-Bench</td>
|
508 |
+
<td align="center">-</td>
|
509 |
+
<td align="center">-</td>
|
510 |
+
<td align="center">-</td>
|
511 |
+
<td align="center">-</td>
|
512 |
+
<td align="center">-</td>
|
513 |
+
<td align="center">75.6</td>
|
514 |
+
</tr>
|
515 |
+
</tbody>
|
516 |
+
</table>
|
517 |
+
</div>
|
518 |
+
|
519 |
+
<sup>
|
520 |
+
- <b>Bold</b> denotes open-source SOTA. <ins>Underlined</ins> indicates the second place in the open-source model.
|
521 |
+
</sup><br/><sup>
|
522 |
+
- "*" indicates that the results in this column are presented in the format of "reproduced_results (reported_results_if_any)". Some results have been omitted due to the failure of the evaluation run.
|
523 |
+
</sup><br/><sup>
|
524 |
+
- The results of Gemma3-27B are sourced directly from its technical report.
|
525 |
+
</sup><br/><sup>
|
526 |
+
- Generation configs for Seed-OSS-36B-Instruct: temperature=1.1, top_p=0.95. Specifically, for Taubench, temperature=1, top_p=0.7.
|
527 |
+
</sup><br/><sup>
|
528 |
+
</sup>
|
529 |
+
|
530 |
+
> [!NOTE]
|
531 |
+
> We recommend sampling with `temperature=1.1` and `top_p=0.95`.
|
532 |
+
|
533 |
+
### Thinking Budget
|
534 |
+
|
535 |
+
Users can flexibly specify the model's thinking budget. The figure below shows the performance curves across different tasks as the thinking budget varies. For simpler tasks (such as IFEval), the model's chain of thought (CoT) is shorter, and the score exhibits fluctuations as the thinking budget increases. For more challenging tasks (such as AIME and LiveCodeBench), the model's CoT is longer, and the score improves with an increase in the thinking budget.
|
536 |
+
|
537 |
+

|
538 |
+
|
539 |
+
Here is an example with a thinking budget set to 512: during the reasoning process, the model periodically triggers self-reflection to estimate the consumed and remaining budget, and delivers the final response once the budget is exhausted or the reasoning concludes.
|
540 |
+
```
|
541 |
+
<seed:think>
|
542 |
+
Got it, let's try to solve this problem step by step. The problem says ... ...
|
543 |
+
<seed:cot_budget_reflect>I have used 129 tokens, and there are 383 tokens remaining for use.</seed:cot_budget_reflect>
|
544 |
+
Using the power rule, ... ...
|
545 |
+
<seed:cot_budget_reflect>I have used 258 tokens, and there are 254 tokens remaining for use.</seed:cot_budget_reflect>
|
546 |
+
Alternatively, remember that ... ...
|
547 |
+
<seed:cot_budget_reflect>I have used 393 tokens, and there are 119 tokens remaining for use.</seed:cot_budget_reflect>
|
548 |
+
Because if ... ...
|
549 |
+
<seed:cot_budget_reflect>I have exhausted my token budget, and now I will start answering the question.</seed:cot_budget_reflect>
|
550 |
+
</seed:think>
|
551 |
+
To solve the problem, we start by using the properties of logarithms to simplify the given equations: (full answer omitted).
|
552 |
+
```
|
553 |
+
|
554 |
+
If no thinking budget is set (default mode), Seed-OSS will initiate thinking with unlimited length. If a thinking budget is specified, users are advised to prioritize values that are integer multiples of 512 (e.g., 512, 1K, 2K, 4K, 8K, or 16K), as the model has been extensively trained on these intervals. Models are instructed to output a direct response when the thinking budget is 0, and we recommend setting any budget below 512 to this value.
|
555 |
+
|
556 |
+
## Quick Start
|
557 |
+
```shell
|
558 |
+
pip3 install -r requirements.txt
|
559 |
+
pip install git+ssh://[email protected]/Fazziekey/transformers.git@seed-oss
|
560 |
+
```
|
561 |
+
|
562 |
+
```python
|
563 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
564 |
+
import os
|
565 |
+
import re
|
566 |
+
|
567 |
+
model_name_or_path = "ByteDance-Seed/Seed-OSS-36B-Instruct"
|
568 |
+
|
569 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
570 |
+
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto") # You may want to use bfloat16 and/or move to GPU here
|
571 |
+
messages = [
|
572 |
+
{"role": "user", "content": "How to make pasta?"},
|
573 |
+
]
|
574 |
+
tokenized_chat = tokenizer.apply_chat_template(
|
575 |
+
messages,
|
576 |
+
tokenize=True,
|
577 |
+
add_generation_prompt=True,
|
578 |
+
return_tensors="pt",
|
579 |
+
thinking_budget=512 # control the thinking budget
|
580 |
+
)
|
581 |
+
|
582 |
+
outputs = model.generate(tokenized_chat.to(model.device), max_new_tokens=2048)
|
583 |
+
|
584 |
+
output_text = tokenizer.decode(outputs[0])
|
585 |
+
```
|
586 |
+
|
587 |
+
## Inference
|
588 |
+
|
589 |
+
### Download Model
|
590 |
+
|
591 |
+
Download Seed-OSS checkpoint to `./Seed-OSS-36B-Instruct`
|
592 |
+
|
593 |
+
### Transformers
|
594 |
+
The `generate.py` script provides a simple interface for model inference with configurable options.
|
595 |
+
|
596 |
+
#### Basic Usage
|
597 |
+
```shell
|
598 |
+
cd inference
|
599 |
+
python3 generate.py --model_path /path/to/model
|
600 |
+
```
|
601 |
+
|
602 |
+
#### Key Parameters
|
603 |
+
| Parameter | Description |
|
604 |
+
|-----------|-------------|
|
605 |
+
| `--model_path` | Path to the pretrained model directory (required) |
|
606 |
+
| `--prompts` | Input prompts (default: sample cooking/code questions) |
|
607 |
+
| `--max_new_tokens` | Maximum tokens to generate (default: 4096) |
|
608 |
+
| `--attn_implementation` | Attention mechanism: `flash_attention_2` (default) or `eager` |
|
609 |
+
| `--load_in_4bit/8bit` | Enable 4-bit/8-bit quantization (reduces memory usage) |
|
610 |
+
| `--thinking_budget` | Thinking budget in tokens (default: -1 for unlimited budget) |
|
611 |
+
|
612 |
+
#### Quantization Examples
|
613 |
+
```shell
|
614 |
+
# 8-bit quantization
|
615 |
+
python3 generate.py --model_path /path/to/model --load_in_8bit True
|
616 |
+
|
617 |
+
# 4-bit quantization
|
618 |
+
python3 generate.py --model_path /path/to/model --load_in_4bit True
|
619 |
+
```
|
620 |
+
|
621 |
+
#### Custom Prompts
|
622 |
+
```shell
|
623 |
+
python3 generate.py --model_path /path/to/model --prompts "['What is machine learning?', 'Explain quantum computing']"
|
624 |
+
```
|
625 |
+
|
626 |
+
### vLLM
|
627 |
+
Use vllm >= 0.10.0 or higher for inference.
|
628 |
+
|
629 |
+
- First install vLLM with Seed-OSS support version:
|
630 |
+
```shell
|
631 |
+
VLLM_USE_PRECOMPILED=1 VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL=1 pip install git+ssh://[email protected]/FoolPlayer/vllm.git@seed-oss
|
632 |
+
```
|
633 |
+
|
634 |
+
- Start vLLM API server:
|
635 |
+
```shell
|
636 |
+
python3 -m vllm.entrypoints.openai.api_server \
|
637 |
+
--host localhost \
|
638 |
+
--port 4321 \
|
639 |
+
--enable-auto-tool-choice \
|
640 |
+
--tool-call-parser seed_oss \
|
641 |
+
--trust-remote-code \
|
642 |
+
--model ./Seed-OSS-36B-Instruct \
|
643 |
+
--chat-template ./Seed-OSS-36B-Instruct/chat_template.jinja \
|
644 |
+
--tensor-parallel-size 8 \
|
645 |
+
--dtype bfloat16 \
|
646 |
+
--served-model-name seed_oss
|
647 |
+
```
|
648 |
+
|
649 |
+
- Test with OpenAI client:
|
650 |
+
|
651 |
+
Chat
|
652 |
+
|
653 |
+
```shell
|
654 |
+
python3 inference/vllm_chat.py
|
655 |
+
```
|
656 |
+
|
657 |
+
Tool Call
|
658 |
+
```shell
|
659 |
+
python3 inference/vllm_tool_call.py
|
660 |
+
```
|
661 |
+
|
662 |
+
|
663 |
+
## Model Card
|
664 |
+
See [MODEL_CARD](./MODEL_CARD.md).
|
665 |
+
|
666 |
+
## License
|
667 |
+
This project is licensed under Apache-2.0. See the [LICENSE](./LICENSE) flie for details.
|
668 |
+
|
669 |
+
## Citation
|
670 |
+
|
671 |
+
```bibtex
|
672 |
+
@misc{seed2025seed-oss,
|
673 |
+
author={ByteDance Seed Team},
|
674 |
+
title={Seed-OSS Open-Source Models},
|
675 |
+
year={2025},
|
676 |
+
howpublished={\url{https://github.com/ByteDance-Seed/seed-oss}}
|
677 |
+
}
|
678 |
+
```
|
679 |
+
|
680 |
+
## About [ByteDance Seed Team](https://seed.bytedance.com/)
|
681 |
+
|
682 |
+
Founded in 2023, ByteDance Seed Team is dedicated to crafting the industry's most advanced AI foundation models. The team aspires to become a world-class research team and make significant contributions to the advancement of science and society.
|
.mdl
ADDED
Binary file (55 Bytes). View file
|
|
.msc
ADDED
Binary file (1.44 kB). View file
|
|
.mv
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Revision:master,CreatedAt:1755752740
|
README.md
ADDED
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
library_name: transformers
|
4 |
+
pipeline_tag: text-generation
|
5 |
+
tags:
|
6 |
+
- vLLM
|
7 |
+
- AWQ
|
8 |
+
language:
|
9 |
+
- zh
|
10 |
+
- en
|
11 |
+
base_model:
|
12 |
+
- ByteDance-Seed/Seed-OSS-36B-Instruct
|
13 |
+
base_model_relation: quantized
|
14 |
+
|
15 |
+
---
|
16 |
+
# Seed-OSS-36B-Instruct-AWQ
|
17 |
+
Base model: [ByteDance-Seed/Seed-OSS-36B-Instruct](https://huggingface.co/ByteDance-Seed/Seed-OSS-36B-Instruct)
|
18 |
+
|
19 |
+
### 【vLLM Single Node with 2 GPUs — Startup Command】
|
20 |
+
```
|
21 |
+
CONTEXT_LENGTH=32768
|
22 |
+
|
23 |
+
vllm serve \
|
24 |
+
QuantTrio/Seed-OSS-36B-Instruct-AWQ \
|
25 |
+
--served-model-name Seed-OSS-36B-Instruct-AWQ \
|
26 |
+
--enable-auto-tool-choice \
|
27 |
+
--tool-call-parser seed_oss \
|
28 |
+
--chat-template ./Seed-OSS-36B-Instruct-AWQ/chat_template.jinja \
|
29 |
+
--swap-space 4 \
|
30 |
+
--max-num-seqs 512 \
|
31 |
+
--max-model-len $CONTEXT_LENGTH \
|
32 |
+
--max-seq-len-to-capture $CONTEXT_LENGTH \
|
33 |
+
--gpu-memory-utilization 0.9 \
|
34 |
+
--tensor-parallel-size 2 \
|
35 |
+
--trust-remote-code \
|
36 |
+
--disable-log-requests \
|
37 |
+
--host 0.0.0.0 \
|
38 |
+
--port 8000
|
39 |
+
```
|
40 |
+
|
41 |
+
### 【Dependencies / Installation】
|
42 |
+
As of **2025-08-21**, create a fresh Python environment and run:
|
43 |
+
|
44 |
+
```bash
|
45 |
+
|
46 |
+
VLLM_USE_PRECOMPILED=1 pip install git+https://github.com/FoolPlayer/vllm.git@seed-oss
|
47 |
+
pip install git+https://github.com/Fazziekey/transformers.git@seed-oss
|
48 |
+
```
|
49 |
+
|
50 |
+
### 【Logs】
|
51 |
+
```
|
52 |
+
2025-08-21
|
53 |
+
1. Initial commit
|
54 |
+
```
|
55 |
+
|
56 |
+
### 【Model Files】
|
57 |
+
| File Size | Last Updated |
|
58 |
+
|-----------|--------------|
|
59 |
+
| `20GB` | `2025-08-21` |
|
60 |
+
|
61 |
+
### 【Model Download】
|
62 |
+
```python
|
63 |
+
from huggingface_hub import snapshot_download
|
64 |
+
snapshot_download('QuantTrio/Seed-OSS-36B-Instruct-AWQ', cache_dir="your_local_path")
|
65 |
+
```
|
66 |
+
|
67 |
+
### 【Overview】
|
68 |
+
## Introduction
|
69 |
+
<div align="center">
|
70 |
+
👋 Hi, everyone!
|
71 |
+
<br>
|
72 |
+
We are <b>ByteDance Seed Team.</b>
|
73 |
+
</div>
|
74 |
+
|
75 |
+
<p align="center">
|
76 |
+
You can get to know us better through the following channels👇
|
77 |
+
<br>
|
78 |
+
<a href="https://seed.bytedance.com/">
|
79 |
+
<img src="https://img.shields.io/badge/Website-%231e37ff?style=for-the-badge&logo=bytedance&logoColor=white"></a>
|
80 |
+
</p>
|
81 |
+
|
82 |
+

|
83 |
+
|
84 |
+
|
85 |
+
# Seed-OSS Open-Source Models
|
86 |
+
<p align="center">
|
87 |
+
<a href="https://github.com/ByteDance-Seed/seed-oss">
|
88 |
+
<img src="https://img.shields.io/badge/Seed-Project Page-yellow"></a>
|
89 |
+
<a href="https://github.com/ByteDance-Seed/seed-oss">
|
90 |
+
<img src="https://img.shields.io/badge/Seed-Tech Report Coming Soon-red"></a>
|
91 |
+
<a href="https://huggingface.co/ByteDance-Seed">
|
92 |
+
<img src="https://img.shields.io/badge/Seed-Hugging Face-orange"></a>
|
93 |
+
<br>
|
94 |
+
<a href="./LICENSE">
|
95 |
+
<img src="https://img.shields.io/badge/License-Apache2.0-blue"></a>
|
96 |
+
</p>
|
97 |
+
|
98 |
+
> [!NOTE]
|
99 |
+
> This model card is dedicated to the `Seed-OSS-36B-Instruct` model.
|
100 |
+
|
101 |
+
## News
|
102 |
+
- [2025/08/20]🔥We release `Seed-OSS-36B-Base` (both with and without synthetic data versions) and `Seed-OSS-36B-Instruct`.
|
103 |
+
|
104 |
+
## Introduction
|
105 |
+
Seed-OSS is a series of open-source large language models developed by ByteDance's Seed Team, designed for powerful long-context, reasoning, agent and general capabilities, and versatile developer-friendly features. Although trained with only 12T tokens, Seed-OSS achieves excellent performance on several popular open benchmarks.
|
106 |
+
|
107 |
+
We release this series of models to the open-source community under the Apache-2.0 license.
|
108 |
+
|
109 |
+
> [!NOTE]
|
110 |
+
> Seed-OSS is primarily optimized for international (i18n) use cases.
|
111 |
+
|
112 |
+
### Key Features
|
113 |
+
- **Flexible Control of Thinking Budget**: Allowing users to flexibly adjust the reasoning length as needed. This capability of dynamically controlling the reasoning length enhances inference efficiency in practical application scenarios.
|
114 |
+
- **Enhanced Reasoning Capability**: Specifically optimized for reasoning tasks while maintaining balanced and excellent general capabilities.
|
115 |
+
- **Agentic Intelligence**: Performs exceptionally well in agentic tasks such as tool-using and issue resolving.
|
116 |
+
- **Research-Friendly**: Given that the inclusion of synthetic instruction data in pre-training may affect the post-training research, we released pre-trained models both with and without instruction data, providing the research community with more diverse options.
|
117 |
+
- **Native Long Context**: Trained with up-to-512K long context natively.
|
118 |
+
|
119 |
+
### Model Summary
|
120 |
+
|
121 |
+
Seed-OSS adopts the popular causal language model architecture with RoPE, GQA attention, RMSNorm and SwiGLU activation.
|
122 |
+
|
123 |
+
<div align="center">
|
124 |
+
|
125 |
+
| | |
|
126 |
+
|:---:|:---:|
|
127 |
+
| | **Seed-OSS-36B** |
|
128 |
+
| **Parameters** | 36B |
|
129 |
+
| **Attention** | GQA |
|
130 |
+
| **Activation Function** | SwiGLU |
|
131 |
+
| **Number of Layers** | 64 |
|
132 |
+
| **Number of QKV Heads** | 80 / 8 / 8 |
|
133 |
+
| **Head Size** | 128 |
|
134 |
+
| **Hidden Size** | 5120 |
|
135 |
+
| **Vocabulary Size** | 155K |
|
136 |
+
| **Context Length** | 512K |
|
137 |
+
| **RoPE Base Frequency** | 1e7 |
|
138 |
+
|
139 |
+
</div>
|
140 |
+
|
141 |
+
|
142 |
+
## Evaluation Results
|
143 |
+
|
144 |
+
### Seed-OSS-36B-Base
|
145 |
+
|
146 |
+
Incorporating synthetic instruction data into pretraining leads to improved performance on most benchmarks. We adopt the version augmented with synthetic instruction data (i.e., *w/ syn.*) as `Seed-OSS-36B-Base`. We also release `Seed-OSS-36B-Base-woSyn` trained without such data (i.e., *w/o syn.*), offering the community a high-performance foundation model unaffected by synthetic instruction data.
|
147 |
+
|
148 |
+
<div align="center">
|
149 |
+
<table>
|
150 |
+
<thead>
|
151 |
+
<tr>
|
152 |
+
<th align="center">Benchmark</th>
|
153 |
+
<th align="center"><sup><a href="https://seed.bytedance.com/en/seed1_6">Seed1.6-Base</a></sup></th>
|
154 |
+
<th align="center"><sup>Qwen3-30B-A3B-Base-2507*</sup></th>
|
155 |
+
<th align="center"><sup>Qwen2.5-32B-Base*</sup></th>
|
156 |
+
<th align="center"><sup>Seed-OSS-36B-Base<br>(<i>w/ syn.</i>)</sup></th>
|
157 |
+
<th align="center"><sup>Seed-OSS-36B-Base-woSyn<br>(<i>w/o syn.</i>)</sup></th>
|
158 |
+
</tr>
|
159 |
+
</thead>
|
160 |
+
<tbody>
|
161 |
+
<tr>
|
162 |
+
<td align="center" colspan=6><strong>Knowledge</strong></td>
|
163 |
+
</tr>
|
164 |
+
<tr>
|
165 |
+
<td align="center">MMLU-Pro</td>
|
166 |
+
<td align="center">70</td>
|
167 |
+
<td align="center">59.8</td>
|
168 |
+
<td align="center">58.5 (55.1)</td>
|
169 |
+
<td align="center"><b>65.1</b></td>
|
170 |
+
<td align="center">60.4</td>
|
171 |
+
</tr>
|
172 |
+
<tr>
|
173 |
+
<td align="center">MMLU</td>
|
174 |
+
<td align="center">88.8</td>
|
175 |
+
<td align="center">82.7</td>
|
176 |
+
<td align="center">84 (83.3)</td>
|
177 |
+
<td align="center"><b>84.9</b></td>
|
178 |
+
<td align="center">84.8</td>
|
179 |
+
</tr>
|
180 |
+
<tr>
|
181 |
+
<td align="center">TriviaQA</td>
|
182 |
+
<td align="center">91</td>
|
183 |
+
<td align="center">76.2</td>
|
184 |
+
<td align="center">76</td>
|
185 |
+
<td align="center"><b>82.1</b></td>
|
186 |
+
<td align="center">81.9</td>
|
187 |
+
</tr>
|
188 |
+
<tr>
|
189 |
+
<td align="center">GPQA-D</td>
|
190 |
+
<td align="center">43.4</td>
|
191 |
+
<td align="center"><b>37</b></td>
|
192 |
+
<td align="center">29.3</td>
|
193 |
+
<td align="center">31.7</td>
|
194 |
+
<td align="center">35.2</td>
|
195 |
+
</tr>
|
196 |
+
<tr>
|
197 |
+
<td align="center">SimpleQA</td>
|
198 |
+
<td align="center">17.1</td>
|
199 |
+
<td align="center">7.2</td>
|
200 |
+
<td align="center">6.1</td>
|
201 |
+
<td align="center">5.8</td>
|
202 |
+
<td align="center"><b>7.4</b></td>
|
203 |
+
</tr>
|
204 |
+
|
205 |
+
<tr>
|
206 |
+
<td align="center" colspan=6><strong>Reasoning</strong></td>
|
207 |
+
</tr>
|
208 |
+
<tr>
|
209 |
+
<td align="center">BBH</td>
|
210 |
+
<td align="center">92.1</td>
|
211 |
+
<td align="center">81.4</td>
|
212 |
+
<td align="center">79.1 (84.5)</td>
|
213 |
+
<td align="center"><b>87.7</b></td>
|
214 |
+
<td align="center">87.2</td>
|
215 |
+
</tr>
|
216 |
+
<tr>
|
217 |
+
<td align="center">AGIEval-en</td>
|
218 |
+
<td align="center">78</td>
|
219 |
+
<td align="center">66.4</td>
|
220 |
+
<td align="center">65.6</td>
|
221 |
+
<td align="center"><b>70.7</b></td>
|
222 |
+
<td align="center">70.1</td>
|
223 |
+
</tr>
|
224 |
+
|
225 |
+
<tr>
|
226 |
+
<td align="center" colspan=6><strong>Math</strong></td>
|
227 |
+
</tr>
|
228 |
+
<tr>
|
229 |
+
<td align="center">GSM8K</td>
|
230 |
+
<td align="center">93.1</td>
|
231 |
+
<td align="center">87</td>
|
232 |
+
<td align="center">87.5 (92.9)</td>
|
233 |
+
<td align="center"><b>90.8</b></td>
|
234 |
+
<td align="center">90.3</td>
|
235 |
+
</tr>
|
236 |
+
<tr>
|
237 |
+
<td align="center">MATH</td>
|
238 |
+
<td align="center">72.9</td>
|
239 |
+
<td align="center">61.1</td>
|
240 |
+
<td align="center">63.5 (57.7)</td>
|
241 |
+
<td align="center"><b>81.7</b></td>
|
242 |
+
<td align="center">61.3</td>
|
243 |
+
</tr>
|
244 |
+
|
245 |
+
<tr>
|
246 |
+
<td align="center" colspan=6><strong>Coding</strong></td>
|
247 |
+
</tr>
|
248 |
+
<tr>
|
249 |
+
<td align="center">MBPP</td>
|
250 |
+
<td align="center">83.6</td>
|
251 |
+
<td align="center">78.8</td>
|
252 |
+
<td align="center">77.8 (84.5)</td>
|
253 |
+
<td align="center"><b>80.6</b></td>
|
254 |
+
<td align="center">74.6</td>
|
255 |
+
</tr>
|
256 |
+
<tr>
|
257 |
+
<td align="center">HumanEval</td>
|
258 |
+
<td align="center">78</td>
|
259 |
+
<td align="center">70.7</td>
|
260 |
+
<td align="center">47.6 (58.5)</td>
|
261 |
+
<td align="center"><b>76.8</b></td>
|
262 |
+
<td align="center">75.6</td>
|
263 |
+
</tr>
|
264 |
+
</tbody>
|
265 |
+
</table>
|
266 |
+
</div>
|
267 |
+
|
268 |
+
<sup>
|
269 |
+
- <b>Bold</b> denotes open-source SOTA.
|
270 |
+
</sup><br/><sup>
|
271 |
+
- "*" indicates that the results in this column are presented in the format of "reproduced_results (reported_results_if_any)".
|
272 |
+
</sup>
|
273 |
+
|
274 |
+
### Seed-OSS-36B-Instruct
|
275 |
+
|
276 |
+
<div align="center">
|
277 |
+
<table>
|
278 |
+
<thead>
|
279 |
+
<tr>
|
280 |
+
<th align="center">Benchmark</th>
|
281 |
+
<th align="center"><sup><a href="https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6-thinking">Seed1.6-Thinking-0715</a></sup></th>
|
282 |
+
<th align="center"><sup>OAI-OSS-20B*</sup></th>
|
283 |
+
<th align="center"><sup>Qwen3-30B-A3B-Thinking-2507*</sup></th>
|
284 |
+
<th align="center"><sup>Qwen3-32B*</sup></th>
|
285 |
+
<th align="center"><sup>Gemma3-27B</sup></th>
|
286 |
+
<th align="center"><sup>Seed-OSS-36B-Instruct</sup></th>
|
287 |
+
</tr>
|
288 |
+
</thead>
|
289 |
+
<tbody>
|
290 |
+
<tr>
|
291 |
+
<td align="center" colspan=7><strong>Knowledge</strong></td>
|
292 |
+
</tr>
|
293 |
+
<tr>
|
294 |
+
<td align="center">MMLU-Pro</td>
|
295 |
+
<td align="center">86.6</td>
|
296 |
+
<td align="center">76.2</td>
|
297 |
+
<td align="center"><ins>81.9</ins> (80.9)</td>
|
298 |
+
<td align="center">81.8</td>
|
299 |
+
<td align="center">67.5</td>
|
300 |
+
<td align="center"><b>82.7</b></td>
|
301 |
+
</tr>
|
302 |
+
<tr>
|
303 |
+
<td align="center">MMLU</td>
|
304 |
+
<td align="center">90.6</td>
|
305 |
+
<td align="center">81.7 (85.3)</td>
|
306 |
+
<td align="center"><ins>86.9</ins></td>
|
307 |
+
<td align="center">86.2</td>
|
308 |
+
<td align="center">76.9</td>
|
309 |
+
<td align="center"><b>87.4</b></td>
|
310 |
+
</tr>
|
311 |
+
<tr>
|
312 |
+
<td align="center">GPQA-D</td>
|
313 |
+
<td align="center">80.7</td>
|
314 |
+
<td align="center"><b>72.2</b> (71.5)</td>
|
315 |
+
<td align="center"><ins>71.4</ins> (73.4)</td>
|
316 |
+
<td align="center">66.7 (68.4)</td>
|
317 |
+
<td align="center">42.4</td>
|
318 |
+
<td align="center"><ins>71.4</ins></td>
|
319 |
+
</tr>
|
320 |
+
<tr>
|
321 |
+
<td align="center">SuperGPQA</td>
|
322 |
+
<td align="center">63.4</td>
|
323 |
+
<td align="center">50.1</td>
|
324 |
+
<td align="center"><b>57.3</b> (56.8)</td>
|
325 |
+
<td align="center">49.3</td>
|
326 |
+
<td align="center">-</td>
|
327 |
+
<td align="center"><ins>55.7</ins></td>
|
328 |
+
</tr>
|
329 |
+
<tr>
|
330 |
+
<td align="center">SimpleQA</td>
|
331 |
+
<td align="center">23.7</td>
|
332 |
+
<td align="center">6.7</td>
|
333 |
+
<td align="center"><b>23.6</b></td>
|
334 |
+
<td align="center">8.6</td>
|
335 |
+
<td align="center"><ins>10</ins></td>
|
336 |
+
<td align="center">9.7</td>
|
337 |
+
</tr>
|
338 |
+
|
339 |
+
<tr>
|
340 |
+
<td align="center" colspan=7><strong>Math</strong></td>
|
341 |
+
</tr>
|
342 |
+
<tr>
|
343 |
+
<td align="center">AIME24</td>
|
344 |
+
<td align="center">90.3</td>
|
345 |
+
<td align="center"><b>92.7</b> (92.1)</td>
|
346 |
+
<td align="center">87.7</td>
|
347 |
+
<td align="center">82.7 (81.4)</td>
|
348 |
+
<td align="center">-</td>
|
349 |
+
<td align="center"><ins>91.7</ins></td>
|
350 |
+
</tr>
|
351 |
+
<tr>
|
352 |
+
<td align="center">AIME25</td>
|
353 |
+
<td align="center">86</td>
|
354 |
+
<td align="center"><b>90.3</b> (91.7)</td>
|
355 |
+
<td align="center">81.3 (85)</td>
|
356 |
+
<td align="center">73.3 (72.9)</td>
|
357 |
+
<td align="center">-</td>
|
358 |
+
<td align="center"><ins>84.7</ins></td>
|
359 |
+
</tr>
|
360 |
+
<tr>
|
361 |
+
<td align="center">BeyondAIME</td>
|
362 |
+
<td align="center">60</td>
|
363 |
+
<td align="center"><b>69</b></td>
|
364 |
+
<td align="center">56</td>
|
365 |
+
<td align="center">29</td>
|
366 |
+
<td align="center">-</td>
|
367 |
+
<td align="center"><ins>65</ins></td>
|
368 |
+
</tr>
|
369 |
+
|
370 |
+
<tr>
|
371 |
+
<td align="center" colspan=7><strong>Reasoning</strong></td>
|
372 |
+
</tr>
|
373 |
+
<tr>
|
374 |
+
<td align="center">ArcAGI V2</td>
|
375 |
+
<td align="center">50.3</td>
|
376 |
+
<td align="center"><b>41.7</b></td>
|
377 |
+
<td align="center">37.8</td>
|
378 |
+
<td align="center">14.4</td>
|
379 |
+
<td align="center">-</td>
|
380 |
+
<td align="center"><ins>40.6</ins></td>
|
381 |
+
</tr>
|
382 |
+
<tr>
|
383 |
+
<td align="center">KORBench</td>
|
384 |
+
<td align="center">74.8</td>
|
385 |
+
<td align="center"><b>72.3</b></td>
|
386 |
+
<td align="center">70.2</td>
|
387 |
+
<td align="center">65.4</td>
|
388 |
+
<td align="center">-</td>
|
389 |
+
<td align="center"><ins>70.6</ins></td>
|
390 |
+
</tr>
|
391 |
+
|
392 |
+
<tr>
|
393 |
+
<td align="center" colspan=7><strong>Coding</strong></td>
|
394 |
+
</tr>
|
395 |
+
<tr>
|
396 |
+
<td align="center">LiveCodeBench v6<br/><sup>(02/2025-05/2025)</sup></td>
|
397 |
+
<td align="center">66.8</td>
|
398 |
+
<td align="center"><ins>63.8</ins></td>
|
399 |
+
<td align="center">60.3 (66)</td>
|
400 |
+
<td align="center">53.4</td>
|
401 |
+
<td align="center">-</td>
|
402 |
+
<td align="center"><b>67.4</b></td>
|
403 |
+
</tr>
|
404 |
+
<tr>
|
405 |
+
<td align="center">HLE</td>
|
406 |
+
<td align="center">13.9</td>
|
407 |
+
<td align="center"><b>12.7</b> (10.9)</td>
|
408 |
+
<td align="center">8.7</td>
|
409 |
+
<td align="center">6.9</td>
|
410 |
+
<td align="center">-</td>
|
411 |
+
<td align="center"><ins>10.1</ins></td>
|
412 |
+
</tr>
|
413 |
+
|
414 |
+
<tr>
|
415 |
+
<td align="center" colspan=7><strong>Instruction Following</strong></td>
|
416 |
+
</tr>
|
417 |
+
<tr>
|
418 |
+
<td align="center">IFEval</td>
|
419 |
+
<td align="center">86.3</td>
|
420 |
+
<td align="center"><b>92.8</b></td>
|
421 |
+
<td align="center">88 (88.9)</td>
|
422 |
+
<td align="center">88.4 (85)</td>
|
423 |
+
<td align="center"><ins>90.4</ins></td>
|
424 |
+
<td align="center">85.8</td>
|
425 |
+
</tr>
|
426 |
+
|
427 |
+
|
428 |
+
<tr>
|
429 |
+
<td align="center" colspan=7><strong>Agent</strong></td>
|
430 |
+
</tr>
|
431 |
+
<tr>
|
432 |
+
<td align="center">TAU1-Retail</td>
|
433 |
+
<td align="center">63</td>
|
434 |
+
<td align="center">(54.8)</td>
|
435 |
+
<td align="center"><ins>58.7</ins> (67.8)</td>
|
436 |
+
<td align="center">40.9</td>
|
437 |
+
<td align="center">-</td>
|
438 |
+
<td align="center"><b>70.4</b></td>
|
439 |
+
</tr>
|
440 |
+
<tr>
|
441 |
+
<td align="center">TAU1-Airline</td>
|
442 |
+
<td align="center">49</td>
|
443 |
+
<td align="center">(38)</td>
|
444 |
+
<td align="center"><b>47</b> (48)</td>
|
445 |
+
<td align="center">38</td>
|
446 |
+
<td align="center">-</td>
|
447 |
+
<td align="center"><ins>46</ins></td>
|
448 |
+
</tr>
|
449 |
+
<tr>
|
450 |
+
<td align="center">SWE-Bench Verified<br/><sup>(OpenHands)</sup></td>
|
451 |
+
<td align="center">41.8</td>
|
452 |
+
<td align="center"><b>(60.7)</b></td>
|
453 |
+
<td align="center">31</td>
|
454 |
+
<td align="center">23.4</td>
|
455 |
+
<td align="center">-</td>
|
456 |
+
<td align="center"><ins>56</ins></td>
|
457 |
+
</tr>
|
458 |
+
<tr>
|
459 |
+
<td align="center">SWE-Bench Verified<br/><sup>(AgentLess 4*10)</sup></td>
|
460 |
+
<td align="center">48.4</td>
|
461 |
+
<td align="center">-</td>
|
462 |
+
<td align="center">33.5</td>
|
463 |
+
<td align="center"><ins>39.7</ins></td>
|
464 |
+
<td align="center">-</td>
|
465 |
+
<td align="center"><b>47</b></td>
|
466 |
+
</tr>
|
467 |
+
<tr>
|
468 |
+
<td align="center">Multi-SWE-Bench</td>
|
469 |
+
<td align="center">17.7</td>
|
470 |
+
<td align="center">-</td>
|
471 |
+
<td align="center"><ins>9.5</ins></td>
|
472 |
+
<td align="center">7.7</td>
|
473 |
+
<td align="center">-</td>
|
474 |
+
<td align="center"><b>17</b></td>
|
475 |
+
</tr>
|
476 |
+
|
477 |
+
<tr>
|
478 |
+
<td align="center" colspan=7><strong>Multilingualism</strong></td>
|
479 |
+
</tr>
|
480 |
+
<tr>
|
481 |
+
<td align="center">MMMLU</td>
|
482 |
+
<td align="center">84.3</td>
|
483 |
+
<td align="center">77.4 (75.7)</td>
|
484 |
+
<td align="center"><b>79</b></td>
|
485 |
+
<td align="center"><b>79</b> (80.6)</td>
|
486 |
+
<td align="center">-</td>
|
487 |
+
<td align="center"><ins>78.4</ins></td>
|
488 |
+
</tr>
|
489 |
+
|
490 |
+
<tr>
|
491 |
+
<td align="center" colspan=7><strong>Long Context</strong></td>
|
492 |
+
</tr>
|
493 |
+
<tr>
|
494 |
+
<td align="center">RULER<br/><sup>(128K)</sup></td>
|
495 |
+
<td align="center">94.5</td>
|
496 |
+
<td align="center">78.7</td>
|
497 |
+
<td align="center"><ins>94.5</ins></td>
|
498 |
+
<td align="center">77.5</td>
|
499 |
+
<td align="center">-</td>
|
500 |
+
<td align="center"><b>94.6</b></td>
|
501 |
+
</tr>
|
502 |
+
|
503 |
+
<tr>
|
504 |
+
<td align="center" colspan=7><strong>Safety</strong></td>
|
505 |
+
</tr>
|
506 |
+
<tr>
|
507 |
+
<td align="center">AIR-Bench</td>
|
508 |
+
<td align="center">-</td>
|
509 |
+
<td align="center">-</td>
|
510 |
+
<td align="center">-</td>
|
511 |
+
<td align="center">-</td>
|
512 |
+
<td align="center">-</td>
|
513 |
+
<td align="center">75.6</td>
|
514 |
+
</tr>
|
515 |
+
</tbody>
|
516 |
+
</table>
|
517 |
+
</div>
|
518 |
+
|
519 |
+
<sup>
|
520 |
+
- <b>Bold</b> denotes open-source SOTA. <ins>Underlined</ins> indicates the second place in the open-source model.
|
521 |
+
</sup><br/><sup>
|
522 |
+
- "*" indicates that the results in this column are presented in the format of "reproduced_results (reported_results_if_any)". Some results have been omitted due to the failure of the evaluation run.
|
523 |
+
</sup><br/><sup>
|
524 |
+
- The results of Gemma3-27B are sourced directly from its technical report.
|
525 |
+
</sup><br/><sup>
|
526 |
+
- Generation configs for Seed-OSS-36B-Instruct: temperature=1.1, top_p=0.95. Specifically, for Taubench, temperature=1, top_p=0.7.
|
527 |
+
</sup><br/><sup>
|
528 |
+
</sup>
|
529 |
+
|
530 |
+
> [!NOTE]
|
531 |
+
> We recommend sampling with `temperature=1.1` and `top_p=0.95`.
|
532 |
+
|
533 |
+
### Thinking Budget
|
534 |
+
|
535 |
+
Users can flexibly specify the model's thinking budget. The figure below shows the performance curves across different tasks as the thinking budget varies. For simpler tasks (such as IFEval), the model's chain of thought (CoT) is shorter, and the score exhibits fluctuations as the thinking budget increases. For more challenging tasks (such as AIME and LiveCodeBench), the model's CoT is longer, and the score improves with an increase in the thinking budget.
|
536 |
+
|
537 |
+

|
538 |
+
|
539 |
+
Here is an example with a thinking budget set to 512: during the reasoning process, the model periodically triggers self-reflection to estimate the consumed and remaining budget, and delivers the final response once the budget is exhausted or the reasoning concludes.
|
540 |
+
```
|
541 |
+
<seed:think>
|
542 |
+
Got it, let's try to solve this problem step by step. The problem says ... ...
|
543 |
+
<seed:cot_budget_reflect>I have used 129 tokens, and there are 383 tokens remaining for use.</seed:cot_budget_reflect>
|
544 |
+
Using the power rule, ... ...
|
545 |
+
<seed:cot_budget_reflect>I have used 258 tokens, and there are 254 tokens remaining for use.</seed:cot_budget_reflect>
|
546 |
+
Alternatively, remember that ... ...
|
547 |
+
<seed:cot_budget_reflect>I have used 393 tokens, and there are 119 tokens remaining for use.</seed:cot_budget_reflect>
|
548 |
+
Because if ... ...
|
549 |
+
<seed:cot_budget_reflect>I have exhausted my token budget, and now I will start answering the question.</seed:cot_budget_reflect>
|
550 |
+
</seed:think>
|
551 |
+
To solve the problem, we start by using the properties of logarithms to simplify the given equations: (full answer omitted).
|
552 |
+
```
|
553 |
+
|
554 |
+
If no thinking budget is set (default mode), Seed-OSS will initiate thinking with unlimited length. If a thinking budget is specified, users are advised to prioritize values that are integer multiples of 512 (e.g., 512, 1K, 2K, 4K, 8K, or 16K), as the model has been extensively trained on these intervals. Models are instructed to output a direct response when the thinking budget is 0, and we recommend setting any budget below 512 to this value.
|
555 |
+
|
556 |
+
## Quick Start
|
557 |
+
```shell
|
558 |
+
pip3 install -r requirements.txt
|
559 |
+
pip install git+ssh://[email protected]/Fazziekey/transformers.git@seed-oss
|
560 |
+
```
|
561 |
+
|
562 |
+
```python
|
563 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
564 |
+
import os
|
565 |
+
import re
|
566 |
+
|
567 |
+
model_name_or_path = "ByteDance-Seed/Seed-OSS-36B-Instruct"
|
568 |
+
|
569 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
570 |
+
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto") # You may want to use bfloat16 and/or move to GPU here
|
571 |
+
messages = [
|
572 |
+
{"role": "user", "content": "How to make pasta?"},
|
573 |
+
]
|
574 |
+
tokenized_chat = tokenizer.apply_chat_template(
|
575 |
+
messages,
|
576 |
+
tokenize=True,
|
577 |
+
add_generation_prompt=True,
|
578 |
+
return_tensors="pt",
|
579 |
+
thinking_budget=512 # control the thinking budget
|
580 |
+
)
|
581 |
+
|
582 |
+
outputs = model.generate(tokenized_chat.to(model.device), max_new_tokens=2048)
|
583 |
+
|
584 |
+
output_text = tokenizer.decode(outputs[0])
|
585 |
+
```
|
586 |
+
|
587 |
+
## Inference
|
588 |
+
|
589 |
+
### Download Model
|
590 |
+
|
591 |
+
Download Seed-OSS checkpoint to `./Seed-OSS-36B-Instruct`
|
592 |
+
|
593 |
+
### Transformers
|
594 |
+
The `generate.py` script provides a simple interface for model inference with configurable options.
|
595 |
+
|
596 |
+
#### Basic Usage
|
597 |
+
```shell
|
598 |
+
cd inference
|
599 |
+
python3 generate.py --model_path /path/to/model
|
600 |
+
```
|
601 |
+
|
602 |
+
#### Key Parameters
|
603 |
+
| Parameter | Description |
|
604 |
+
|-----------|-------------|
|
605 |
+
| `--model_path` | Path to the pretrained model directory (required) |
|
606 |
+
| `--prompts` | Input prompts (default: sample cooking/code questions) |
|
607 |
+
| `--max_new_tokens` | Maximum tokens to generate (default: 4096) |
|
608 |
+
| `--attn_implementation` | Attention mechanism: `flash_attention_2` (default) or `eager` |
|
609 |
+
| `--load_in_4bit/8bit` | Enable 4-bit/8-bit quantization (reduces memory usage) |
|
610 |
+
| `--thinking_budget` | Thinking budget in tokens (default: -1 for unlimited budget) |
|
611 |
+
|
612 |
+
#### Quantization Examples
|
613 |
+
```shell
|
614 |
+
# 8-bit quantization
|
615 |
+
python3 generate.py --model_path /path/to/model --load_in_8bit True
|
616 |
+
|
617 |
+
# 4-bit quantization
|
618 |
+
python3 generate.py --model_path /path/to/model --load_in_4bit True
|
619 |
+
```
|
620 |
+
|
621 |
+
#### Custom Prompts
|
622 |
+
```shell
|
623 |
+
python3 generate.py --model_path /path/to/model --prompts "['What is machine learning?', 'Explain quantum computing']"
|
624 |
+
```
|
625 |
+
|
626 |
+
### vLLM
|
627 |
+
Use vllm >= 0.10.0 or higher for inference.
|
628 |
+
|
629 |
+
- First install vLLM with Seed-OSS support version:
|
630 |
+
```shell
|
631 |
+
VLLM_USE_PRECOMPILED=1 VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL=1 pip install git+ssh://[email protected]/FoolPlayer/vllm.git@seed-oss
|
632 |
+
```
|
633 |
+
|
634 |
+
- Start vLLM API server:
|
635 |
+
```shell
|
636 |
+
python3 -m vllm.entrypoints.openai.api_server \
|
637 |
+
--host localhost \
|
638 |
+
--port 4321 \
|
639 |
+
--enable-auto-tool-choice \
|
640 |
+
--tool-call-parser seed_oss \
|
641 |
+
--trust-remote-code \
|
642 |
+
--model ./Seed-OSS-36B-Instruct \
|
643 |
+
--chat-template ./Seed-OSS-36B-Instruct/chat_template.jinja \
|
644 |
+
--tensor-parallel-size 8 \
|
645 |
+
--dtype bfloat16 \
|
646 |
+
--served-model-name seed_oss
|
647 |
+
```
|
648 |
+
|
649 |
+
- Test with OpenAI client:
|
650 |
+
|
651 |
+
Chat
|
652 |
+
|
653 |
+
```shell
|
654 |
+
python3 inference/vllm_chat.py
|
655 |
+
```
|
656 |
+
|
657 |
+
Tool Call
|
658 |
+
```shell
|
659 |
+
python3 inference/vllm_tool_call.py
|
660 |
+
```
|
661 |
+
|
662 |
+
|
663 |
+
## Model Card
|
664 |
+
See [MODEL_CARD](./MODEL_CARD.md).
|
665 |
+
|
666 |
+
## License
|
667 |
+
This project is licensed under Apache-2.0. See the [LICENSE](./LICENSE) flie for details.
|
668 |
+
|
669 |
+
## Citation
|
670 |
+
|
671 |
+
```bibtex
|
672 |
+
@misc{seed2025seed-oss,
|
673 |
+
author={ByteDance Seed Team},
|
674 |
+
title={Seed-OSS Open-Source Models},
|
675 |
+
year={2025},
|
676 |
+
howpublished={\url{https://github.com/ByteDance-Seed/seed-oss}}
|
677 |
+
}
|
678 |
+
```
|
679 |
+
|
680 |
+
## About [ByteDance Seed Team](https://seed.bytedance.com/)
|
681 |
+
|
682 |
+
Founded in 2023, ByteDance Seed Team is dedicated to crafting the industry's most advanced AI foundation models. The team aspires to become a world-class research team and make significant contributions to the advancement of science and society.
|
chat_template.jinja
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{# ----------‑‑‑ special token variables ‑‑‑---------- #}
|
2 |
+
{%- set bos_token = '<seed:bos>' -%}
|
3 |
+
{%- set eos_token = '<seed:eos>' -%}
|
4 |
+
{%- set pad_token = '<seed:pad>' -%}
|
5 |
+
{%- set toolcall_begin_token = '<seed:tool_call>' -%}
|
6 |
+
{%- set toolcall_end_token = '</seed:tool_call>' -%}
|
7 |
+
{%- set think_begin_token = '<seed:think>' -%}
|
8 |
+
{%- set think_end_token = '</seed:think>' -%}
|
9 |
+
{%- set budget_begin_token = '<seed:cot_budget_reflect>'-%}
|
10 |
+
{%- set budget_end_token = '</seed:cot_budget_reflect>'-%}
|
11 |
+
{# -------------- reflection-interval lookup -------------- #}
|
12 |
+
{%- if not thinking_budget is defined %}
|
13 |
+
{%- set thinking_budget = -1 -%}
|
14 |
+
{%- endif -%}
|
15 |
+
{%- set budget_reflections_v05 = {
|
16 |
+
0: 0,
|
17 |
+
512: 128,
|
18 |
+
1024: 256,
|
19 |
+
2048: 512,
|
20 |
+
4096: 512,
|
21 |
+
8192: 1024,
|
22 |
+
16384: 1024
|
23 |
+
} -%}
|
24 |
+
{# 找到 “大于等于 thinking_budget” 的第一个档位 #}
|
25 |
+
{%- set ns = namespace(interval = None) -%}
|
26 |
+
{%- for k, v in budget_reflections_v05 | dictsort -%}
|
27 |
+
{%- if ns.interval is none and thinking_budget <= k -%}
|
28 |
+
{%- set ns.interval = v -%}
|
29 |
+
{%- endif -%}
|
30 |
+
{%- endfor -%}
|
31 |
+
{# 若超过最大档位,则用最后一个档位的值 #}
|
32 |
+
{%- if ns.interval is none -%}
|
33 |
+
{%- set ns.interval = budget_reflections_v05[16384] -%}
|
34 |
+
{%- endif -%}
|
35 |
+
{# ---------- 预处理 system 消息 ---------- #}
|
36 |
+
{%- if messages[0]["role"] == "system" %}
|
37 |
+
{%- set system_message = messages[0]["content"] %}
|
38 |
+
{%- set loop_messages = messages[1:] %}
|
39 |
+
{%- else %}
|
40 |
+
{%- set loop_messages = messages %}
|
41 |
+
{%- endif %}
|
42 |
+
{# ---------- 确保 tools 存在 ---------- #}
|
43 |
+
{%- if not tools is defined or tools is none %}
|
44 |
+
{%- set tools = [] %}
|
45 |
+
{%- endif %}
|
46 |
+
{# tools2doc.jinja #}
|
47 |
+
{%- macro py_type(t) -%}
|
48 |
+
{%- if t == "string" -%}str
|
49 |
+
{%- elif t in ("number", "integer") -%}int
|
50 |
+
{%- elif t == "boolean" -%}bool
|
51 |
+
{%- elif t == "array" -%}list
|
52 |
+
{%- else -%}Any{%- endif -%}
|
53 |
+
{%- endmacro -%}
|
54 |
+
{# ---------- 输出 system 块 ---------- #}
|
55 |
+
{%- if system_message is defined %}
|
56 |
+
{{ bos_token + "system\n" + system_message }}
|
57 |
+
{%- else %}
|
58 |
+
{%- if tools is iterable and tools | length > 0 %}
|
59 |
+
{{ bos_token + "system\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query." }}
|
60 |
+
{%- endif %}
|
61 |
+
{%- endif %}
|
62 |
+
{%- if use_json_tooldef is defined and use_json_tooldef %}
|
63 |
+
|
64 |
+
{{"Tool List:\nYou are authorized to use the following tools (described in JSON Schema format). Before performing any task, you must decide how to call them based on the descriptions and parameters of these tools."}}
|
65 |
+
{{ tools | tojson(ensure_ascii=False) }}
|
66 |
+
{%- else %}
|
67 |
+
{%- for item in tools if item.type == "function" %}
|
68 |
+
|
69 |
+
|
70 |
+
Function:
|
71 |
+
def {{ item.function.name }}(
|
72 |
+
{%- for name, spec in item.function.parameters.properties.items() %}
|
73 |
+
{{- name }}: {{ py_type(spec.type) }}{% if not loop.last %},{% endif %}
|
74 |
+
{%- endfor %}):
|
75 |
+
"""
|
76 |
+
{{ item.function.description | trim }}
|
77 |
+
|
78 |
+
{# ---------- Args ---------- #}
|
79 |
+
{%- if item.function.parameters.properties %}
|
80 |
+
Args:
|
81 |
+
{%- for name, spec in item.function.parameters.properties.items() %}
|
82 |
+
|
83 |
+
- {{ name }} ({{ py_type(spec.type) }})
|
84 |
+
{%- if name in item.function.parameters.required %} [必填]{% else %} [选填]{% endif %}:
|
85 |
+
{{- " " ~ (spec.description or "") }}
|
86 |
+
{%- endfor %}
|
87 |
+
{%- endif %}
|
88 |
+
|
89 |
+
{# ---------- Returns ---------- #}
|
90 |
+
{%- if item.function.returns is defined
|
91 |
+
and item.function.returns.properties is defined
|
92 |
+
and item.function.returns.properties %}
|
93 |
+
Returns:
|
94 |
+
{%- for name, spec in item.function.returns.properties.items() %}
|
95 |
+
|
96 |
+
- {{ name }} ({{ py_type(spec.type) }}):
|
97 |
+
{{- " " ~ (spec.description or "") }}
|
98 |
+
{%- endfor %}
|
99 |
+
{%- endif %}
|
100 |
+
|
101 |
+
"""
|
102 |
+
{%- endfor %}
|
103 |
+
{%- endif %}
|
104 |
+
{%- if tools is iterable and tools | length > 0 %}
|
105 |
+
|
106 |
+
{{"工具调用请遵循如下格式:\n<seed:tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple lines</parameter>\n</function>\n</seed:tool_call>\n"}}
|
107 |
+
{%- endif %}
|
108 |
+
{# 结束 system 块行尾 #}
|
109 |
+
{%- if system_message is defined or tools is iterable and tools | length > 0 %}
|
110 |
+
{{ eos_token }}
|
111 |
+
{%- endif %}
|
112 |
+
{# ---------- Thinking Budget ---------- #}
|
113 |
+
{%- if thinking_budget is defined %}
|
114 |
+
{%- if thinking_budget == 0 %}
|
115 |
+
{{ bos_token+"system" }}
|
116 |
+
{{ "You are an intelligent assistant that can answer questions in one step without the need for reasoning and thinking, that is, your thinking budget is 0. Next, please skip the thinking process and directly start answering the user's questions." }}
|
117 |
+
{{ eos_token }}
|
118 |
+
{%- elif not thinking_budget == -1 %}
|
119 |
+
{{ bos_token+"system" }}
|
120 |
+
{{ "You are an intelligent assistant with reflective ability. In the process of thinking and reasoning, you need to strictly follow the thinking budget, which is "}}{{thinking_budget}}{{". That is, you need to complete your thinking within "}}{{thinking_budget}}{{" tokens and start answering the user's questions. You will reflect on your thinking process every "}}{{ns.interval}}{{" tokens, stating how many tokens have been used and how many are left."}}
|
121 |
+
{{ eos_token }}
|
122 |
+
{%- endif %}
|
123 |
+
{%- endif %}
|
124 |
+
{# ---------- 逐条写出历史消息 ---------- #}
|
125 |
+
{%- for message in loop_messages %}
|
126 |
+
{%- if message.role == "assistant"
|
127 |
+
and message.tool_calls is defined
|
128 |
+
and message.tool_calls is iterable
|
129 |
+
and message.tool_calls | length > 0 %}
|
130 |
+
{{ bos_token + message.role }}
|
131 |
+
{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}
|
132 |
+
{{ "\n" + think_begin_token + message.reasoning_content | trim + think_end_token }}
|
133 |
+
{%- endif %}
|
134 |
+
{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
|
135 |
+
{{ "\n" + message.content | trim + "\n" }}
|
136 |
+
{%- endif %}
|
137 |
+
{%- for tool_call in message.tool_calls %}
|
138 |
+
{%- if tool_call.function is defined %}{% set tool_call = tool_call.function %}{% endif %}
|
139 |
+
{{ "\n" + toolcall_begin_token + "\n<function=" + tool_call.name + ">\n" }}
|
140 |
+
{%- if tool_call.arguments is defined %}
|
141 |
+
{%- for arg_name, arg_value in tool_call.arguments | items %}
|
142 |
+
{{ "<parameter=" + arg_name + ">" }}
|
143 |
+
{%- set arg_value = arg_value if arg_value is string else arg_value | string %}
|
144 |
+
{{ arg_value+"</parameter>\n" }}
|
145 |
+
{%- endfor %}
|
146 |
+
{%- endif %}
|
147 |
+
{{ "</function>\n" + toolcall_end_token }}
|
148 |
+
{%- endfor %}
|
149 |
+
{{ eos_token }}
|
150 |
+
{%- elif message.role in ["user", "system"] %}
|
151 |
+
{{ bos_token + message.role + "\n" + message.content + eos_token }}
|
152 |
+
{%- elif message.role == "assistant" %}
|
153 |
+
{{ bos_token + message.role }}
|
154 |
+
{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}
|
155 |
+
{{ "\n" + think_begin_token + message.reasoning_content | trim + think_end_token }}
|
156 |
+
{%- endif %}
|
157 |
+
{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
|
158 |
+
{{ "\n" + message.content | trim + eos_token }}
|
159 |
+
{%- endif %}
|
160 |
+
{# 包括 tool 角色,在这个逻辑 #}
|
161 |
+
{%- else %}
|
162 |
+
{{ bos_token + message.role + "\n" + message.content + eos_token }}
|
163 |
+
{%- endif %}
|
164 |
+
{%- endfor %}
|
165 |
+
{# ---------- 控制模型开始续写 ---------- #}
|
166 |
+
{%- if add_generation_prompt %}
|
167 |
+
{{ bos_token+"assistant\n" }}
|
168 |
+
{%- if thinking_budget == 0 %}
|
169 |
+
{{ think_begin_token+budget_begin_token }}
|
170 |
+
{%- endif %}
|
171 |
+
{%- endif %}
|
config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name_or_path": "tclf90/Seed-OSS-36B-Instruct-AWQ",
|
3 |
+
"architectures": [
|
4 |
+
"SeedOssForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"attention_out_bias": false,
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"pad_token_id": 1,
|
11 |
+
"eos_token_id": 2,
|
12 |
+
"head_dim": 128,
|
13 |
+
"hidden_act": "silu",
|
14 |
+
"hidden_size": 5120,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 27648,
|
17 |
+
"max_position_embeddings": 524288,
|
18 |
+
"mlp_bias": false,
|
19 |
+
"model_type": "seed_oss",
|
20 |
+
"num_attention_heads": 80,
|
21 |
+
"num_hidden_layers": 64,
|
22 |
+
"num_key_value_heads": 8,
|
23 |
+
"residual_dropout": 0.1,
|
24 |
+
"rms_norm_eps": 1e-06,
|
25 |
+
"rope_scaling": {
|
26 |
+
"rope_type": "default"
|
27 |
+
},
|
28 |
+
"rope_theta": 10000000.0,
|
29 |
+
"tie_word_embeddings": false,
|
30 |
+
"torch_dtype": "bfloat16",
|
31 |
+
"transformers_version": "4.55.0",
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 155136,
|
34 |
+
"quantization_config": {
|
35 |
+
"quant_method": "awq",
|
36 |
+
"bits": 4,
|
37 |
+
"group_size": 128,
|
38 |
+
"version": "gemm",
|
39 |
+
"zero_point": true
|
40 |
+
}
|
41 |
+
}
|
configuration.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
generation_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"pad_token_id": 1,
|
5 |
+
"eos_token_id": 2,
|
6 |
+
"transformers_version": "4.55.0",
|
7 |
+
"temperature": 1.1,
|
8 |
+
"top_p": 0.95
|
9 |
+
}
|
10 |
+
|
model-00001-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcb92887d96a6e292db5409424b08df40a7f7eee1900f22e4446adcd2667f236
|
3 |
+
size 2991620632
|
model-00002-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d78765a1dec0ec6840e831228d540d26cc1803e9b5d5647d505cc94dda65834b
|
3 |
+
size 2953123296
|
model-00003-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f33cd2486137af21d0a3e639a84243047ab39ca20ad0076e403b18b7c2d43a2d
|
3 |
+
size 2939550880
|
model-00004-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ecd6b0af244c858d1b6825b9735ac932697a8e63bb8be44c7fa4477e8a6b9f1f
|
3 |
+
size 2953123432
|
model-00005-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b328861cc4469b3da17aa9922e904a34638f61b2ebbd8ea63634539bfd80d40
|
3 |
+
size 2939550880
|
model-00006-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a176b18a344a12ad7a5aa2ee778ffa8342527a81270d92ad34ba1e4548d500af
|
3 |
+
size 2939530184
|
model-00007-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e934df19e012819fccaec87b1af31d18fbc653638ef64bae94d5a9d1f3799f0
|
3 |
+
size 2931659624
|
model-00008-of-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e97955ec7fdf519bd1e8819933b92f965765a25f5f8aa811b66765d26484bdd
|
3 |
+
size 487662984
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<seed:bos>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<seed:eos>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<seed:pad>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
thinking_budget.png
ADDED
![]() |
Git LFS Details
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6bd848f52451824a3033a9f1e67eea5b399a13c90f845a332d3a29537e05827
|
3 |
+
size 11883696
|
tokenizer_config.json
ADDED
@@ -0,0 +1,1035 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<seed:bos>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<seed:pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "<seed:eos>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<seed:think>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": false
|
34 |
+
},
|
35 |
+
"4": {
|
36 |
+
"content": "</seed:think>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": false
|
42 |
+
},
|
43 |
+
"5": {
|
44 |
+
"content": "<seed:cot_budget_reflect>",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": false
|
50 |
+
},
|
51 |
+
"6": {
|
52 |
+
"content": "</seed:cot_budget_reflect>",
|
53 |
+
"lstrip": false,
|
54 |
+
"normalized": false,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": false
|
58 |
+
},
|
59 |
+
"7": {
|
60 |
+
"content": "<seed:tool_call>",
|
61 |
+
"lstrip": false,
|
62 |
+
"normalized": false,
|
63 |
+
"rstrip": false,
|
64 |
+
"single_word": false,
|
65 |
+
"special": false
|
66 |
+
},
|
67 |
+
"8": {
|
68 |
+
"content": "</seed:tool_call>",
|
69 |
+
"lstrip": false,
|
70 |
+
"normalized": false,
|
71 |
+
"rstrip": false,
|
72 |
+
"single_word": false,
|
73 |
+
"special": false
|
74 |
+
},
|
75 |
+
"9": {
|
76 |
+
"content": "<[PLHD9_never_used]>",
|
77 |
+
"lstrip": false,
|
78 |
+
"normalized": false,
|
79 |
+
"rstrip": false,
|
80 |
+
"single_word": false,
|
81 |
+
"special": true
|
82 |
+
},
|
83 |
+
"10": {
|
84 |
+
"content": "<[PLHD10_never_used]>",
|
85 |
+
"lstrip": false,
|
86 |
+
"normalized": false,
|
87 |
+
"rstrip": false,
|
88 |
+
"single_word": false,
|
89 |
+
"special": true
|
90 |
+
},
|
91 |
+
"11": {
|
92 |
+
"content": "<[PLHD11_never_used]>",
|
93 |
+
"lstrip": false,
|
94 |
+
"normalized": false,
|
95 |
+
"rstrip": false,
|
96 |
+
"single_word": false,
|
97 |
+
"special": true
|
98 |
+
},
|
99 |
+
"12": {
|
100 |
+
"content": "<[PLHD12_never_used]>",
|
101 |
+
"lstrip": false,
|
102 |
+
"normalized": false,
|
103 |
+
"rstrip": false,
|
104 |
+
"single_word": false,
|
105 |
+
"special": true
|
106 |
+
},
|
107 |
+
"13": {
|
108 |
+
"content": "<[PLHD13_never_used]>",
|
109 |
+
"lstrip": false,
|
110 |
+
"normalized": false,
|
111 |
+
"rstrip": false,
|
112 |
+
"single_word": false,
|
113 |
+
"special": true
|
114 |
+
},
|
115 |
+
"14": {
|
116 |
+
"content": "<[PLHD14_never_used]>",
|
117 |
+
"lstrip": false,
|
118 |
+
"normalized": false,
|
119 |
+
"rstrip": false,
|
120 |
+
"single_word": false,
|
121 |
+
"special": true
|
122 |
+
},
|
123 |
+
"15": {
|
124 |
+
"content": "<[PLHD15_never_used]>",
|
125 |
+
"lstrip": false,
|
126 |
+
"normalized": false,
|
127 |
+
"rstrip": false,
|
128 |
+
"single_word": false,
|
129 |
+
"special": true
|
130 |
+
},
|
131 |
+
"16": {
|
132 |
+
"content": "<[PLHD16_never_used]>",
|
133 |
+
"lstrip": false,
|
134 |
+
"normalized": false,
|
135 |
+
"rstrip": false,
|
136 |
+
"single_word": false,
|
137 |
+
"special": true
|
138 |
+
},
|
139 |
+
"17": {
|
140 |
+
"content": "<[PLHD17_never_used]>",
|
141 |
+
"lstrip": false,
|
142 |
+
"normalized": false,
|
143 |
+
"rstrip": false,
|
144 |
+
"single_word": false,
|
145 |
+
"special": true
|
146 |
+
},
|
147 |
+
"18": {
|
148 |
+
"content": "<[PLHD18_never_used]>",
|
149 |
+
"lstrip": false,
|
150 |
+
"normalized": false,
|
151 |
+
"rstrip": false,
|
152 |
+
"single_word": false,
|
153 |
+
"special": true
|
154 |
+
},
|
155 |
+
"19": {
|
156 |
+
"content": "<[PLHD19_never_used]>",
|
157 |
+
"lstrip": false,
|
158 |
+
"normalized": false,
|
159 |
+
"rstrip": false,
|
160 |
+
"single_word": false,
|
161 |
+
"special": true
|
162 |
+
},
|
163 |
+
"20": {
|
164 |
+
"content": "<[PLHD20_never_used]>",
|
165 |
+
"lstrip": false,
|
166 |
+
"normalized": false,
|
167 |
+
"rstrip": false,
|
168 |
+
"single_word": false,
|
169 |
+
"special": true
|
170 |
+
},
|
171 |
+
"21": {
|
172 |
+
"content": "<[PLHD21_never_used]>",
|
173 |
+
"lstrip": false,
|
174 |
+
"normalized": false,
|
175 |
+
"rstrip": false,
|
176 |
+
"single_word": false,
|
177 |
+
"special": true
|
178 |
+
},
|
179 |
+
"22": {
|
180 |
+
"content": "<[PLHD22_never_used]>",
|
181 |
+
"lstrip": false,
|
182 |
+
"normalized": false,
|
183 |
+
"rstrip": false,
|
184 |
+
"single_word": false,
|
185 |
+
"special": true
|
186 |
+
},
|
187 |
+
"23": {
|
188 |
+
"content": "<[PLHD23_never_used]>",
|
189 |
+
"lstrip": false,
|
190 |
+
"normalized": false,
|
191 |
+
"rstrip": false,
|
192 |
+
"single_word": false,
|
193 |
+
"special": true
|
194 |
+
},
|
195 |
+
"24": {
|
196 |
+
"content": "<[PLHD24_never_used]>",
|
197 |
+
"lstrip": false,
|
198 |
+
"normalized": false,
|
199 |
+
"rstrip": false,
|
200 |
+
"single_word": false,
|
201 |
+
"special": true
|
202 |
+
},
|
203 |
+
"25": {
|
204 |
+
"content": "<[PLHD25_never_used]>",
|
205 |
+
"lstrip": false,
|
206 |
+
"normalized": false,
|
207 |
+
"rstrip": false,
|
208 |
+
"single_word": false,
|
209 |
+
"special": true
|
210 |
+
},
|
211 |
+
"26": {
|
212 |
+
"content": "<[PLHD26_never_used]>",
|
213 |
+
"lstrip": false,
|
214 |
+
"normalized": false,
|
215 |
+
"rstrip": false,
|
216 |
+
"single_word": false,
|
217 |
+
"special": true
|
218 |
+
},
|
219 |
+
"27": {
|
220 |
+
"content": "<[PLHD27_never_used]>",
|
221 |
+
"lstrip": false,
|
222 |
+
"normalized": false,
|
223 |
+
"rstrip": false,
|
224 |
+
"single_word": false,
|
225 |
+
"special": true
|
226 |
+
},
|
227 |
+
"28": {
|
228 |
+
"content": "<[PLHD28_never_used]>",
|
229 |
+
"lstrip": false,
|
230 |
+
"normalized": false,
|
231 |
+
"rstrip": false,
|
232 |
+
"single_word": false,
|
233 |
+
"special": true
|
234 |
+
},
|
235 |
+
"29": {
|
236 |
+
"content": "<[PLHD29_never_used]>",
|
237 |
+
"lstrip": false,
|
238 |
+
"normalized": false,
|
239 |
+
"rstrip": false,
|
240 |
+
"single_word": false,
|
241 |
+
"special": true
|
242 |
+
},
|
243 |
+
"30": {
|
244 |
+
"content": "<[PLHD30_never_used]>",
|
245 |
+
"lstrip": false,
|
246 |
+
"normalized": false,
|
247 |
+
"rstrip": false,
|
248 |
+
"single_word": false,
|
249 |
+
"special": true
|
250 |
+
},
|
251 |
+
"31": {
|
252 |
+
"content": "<[PLHD31_never_used]>",
|
253 |
+
"lstrip": false,
|
254 |
+
"normalized": false,
|
255 |
+
"rstrip": false,
|
256 |
+
"single_word": false,
|
257 |
+
"special": true
|
258 |
+
},
|
259 |
+
"32": {
|
260 |
+
"content": "<[PLHD32_never_used]>",
|
261 |
+
"lstrip": false,
|
262 |
+
"normalized": false,
|
263 |
+
"rstrip": false,
|
264 |
+
"single_word": false,
|
265 |
+
"special": true
|
266 |
+
},
|
267 |
+
"33": {
|
268 |
+
"content": "<[PLHD33_never_used]>",
|
269 |
+
"lstrip": false,
|
270 |
+
"normalized": false,
|
271 |
+
"rstrip": false,
|
272 |
+
"single_word": false,
|
273 |
+
"special": true
|
274 |
+
},
|
275 |
+
"34": {
|
276 |
+
"content": "<[PLHD34_never_used]>",
|
277 |
+
"lstrip": false,
|
278 |
+
"normalized": false,
|
279 |
+
"rstrip": false,
|
280 |
+
"single_word": false,
|
281 |
+
"special": true
|
282 |
+
},
|
283 |
+
"35": {
|
284 |
+
"content": "<[PLHD35_never_used]>",
|
285 |
+
"lstrip": false,
|
286 |
+
"normalized": false,
|
287 |
+
"rstrip": false,
|
288 |
+
"single_word": false,
|
289 |
+
"special": true
|
290 |
+
},
|
291 |
+
"36": {
|
292 |
+
"content": "<[PLHD36_never_used]>",
|
293 |
+
"lstrip": false,
|
294 |
+
"normalized": false,
|
295 |
+
"rstrip": false,
|
296 |
+
"single_word": false,
|
297 |
+
"special": true
|
298 |
+
},
|
299 |
+
"37": {
|
300 |
+
"content": "<[PLHD37_never_used]>",
|
301 |
+
"lstrip": false,
|
302 |
+
"normalized": false,
|
303 |
+
"rstrip": false,
|
304 |
+
"single_word": false,
|
305 |
+
"special": true
|
306 |
+
},
|
307 |
+
"38": {
|
308 |
+
"content": "<[PLHD38_never_used]>",
|
309 |
+
"lstrip": false,
|
310 |
+
"normalized": false,
|
311 |
+
"rstrip": false,
|
312 |
+
"single_word": false,
|
313 |
+
"special": true
|
314 |
+
},
|
315 |
+
"39": {
|
316 |
+
"content": "<[PLHD39_never_used]>",
|
317 |
+
"lstrip": false,
|
318 |
+
"normalized": false,
|
319 |
+
"rstrip": false,
|
320 |
+
"single_word": false,
|
321 |
+
"special": true
|
322 |
+
},
|
323 |
+
"40": {
|
324 |
+
"content": "<[PLHD40_never_used]>",
|
325 |
+
"lstrip": false,
|
326 |
+
"normalized": false,
|
327 |
+
"rstrip": false,
|
328 |
+
"single_word": false,
|
329 |
+
"special": true
|
330 |
+
},
|
331 |
+
"41": {
|
332 |
+
"content": "<[PLHD41_never_used]>",
|
333 |
+
"lstrip": false,
|
334 |
+
"normalized": false,
|
335 |
+
"rstrip": false,
|
336 |
+
"single_word": false,
|
337 |
+
"special": true
|
338 |
+
},
|
339 |
+
"42": {
|
340 |
+
"content": "<[PLHD42_never_used]>",
|
341 |
+
"lstrip": false,
|
342 |
+
"normalized": false,
|
343 |
+
"rstrip": false,
|
344 |
+
"single_word": false,
|
345 |
+
"special": true
|
346 |
+
},
|
347 |
+
"43": {
|
348 |
+
"content": "<[PLHD43_never_used]>",
|
349 |
+
"lstrip": false,
|
350 |
+
"normalized": false,
|
351 |
+
"rstrip": false,
|
352 |
+
"single_word": false,
|
353 |
+
"special": true
|
354 |
+
},
|
355 |
+
"44": {
|
356 |
+
"content": "<[PLHD44_never_used]>",
|
357 |
+
"lstrip": false,
|
358 |
+
"normalized": false,
|
359 |
+
"rstrip": false,
|
360 |
+
"single_word": false,
|
361 |
+
"special": true
|
362 |
+
},
|
363 |
+
"45": {
|
364 |
+
"content": "<[PLHD45_never_used]>",
|
365 |
+
"lstrip": false,
|
366 |
+
"normalized": false,
|
367 |
+
"rstrip": false,
|
368 |
+
"single_word": false,
|
369 |
+
"special": true
|
370 |
+
},
|
371 |
+
"46": {
|
372 |
+
"content": "<[PLHD46_never_used]>",
|
373 |
+
"lstrip": false,
|
374 |
+
"normalized": false,
|
375 |
+
"rstrip": false,
|
376 |
+
"single_word": false,
|
377 |
+
"special": true
|
378 |
+
},
|
379 |
+
"47": {
|
380 |
+
"content": "<[PLHD47_never_used]>",
|
381 |
+
"lstrip": false,
|
382 |
+
"normalized": false,
|
383 |
+
"rstrip": false,
|
384 |
+
"single_word": false,
|
385 |
+
"special": true
|
386 |
+
},
|
387 |
+
"48": {
|
388 |
+
"content": "<[PLHD48_never_used]>",
|
389 |
+
"lstrip": false,
|
390 |
+
"normalized": false,
|
391 |
+
"rstrip": false,
|
392 |
+
"single_word": false,
|
393 |
+
"special": true
|
394 |
+
},
|
395 |
+
"49": {
|
396 |
+
"content": "<[PLHD49_never_used]>",
|
397 |
+
"lstrip": false,
|
398 |
+
"normalized": false,
|
399 |
+
"rstrip": false,
|
400 |
+
"single_word": false,
|
401 |
+
"special": true
|
402 |
+
},
|
403 |
+
"50": {
|
404 |
+
"content": "<[PLHD50_never_used]>",
|
405 |
+
"lstrip": false,
|
406 |
+
"normalized": false,
|
407 |
+
"rstrip": false,
|
408 |
+
"single_word": false,
|
409 |
+
"special": true
|
410 |
+
},
|
411 |
+
"51": {
|
412 |
+
"content": "<[PLHD51_never_used]>",
|
413 |
+
"lstrip": false,
|
414 |
+
"normalized": false,
|
415 |
+
"rstrip": false,
|
416 |
+
"single_word": false,
|
417 |
+
"special": true
|
418 |
+
},
|
419 |
+
"52": {
|
420 |
+
"content": "<[PLHD52_never_used]>",
|
421 |
+
"lstrip": false,
|
422 |
+
"normalized": false,
|
423 |
+
"rstrip": false,
|
424 |
+
"single_word": false,
|
425 |
+
"special": true
|
426 |
+
},
|
427 |
+
"53": {
|
428 |
+
"content": "<[PLHD53_never_used]>",
|
429 |
+
"lstrip": false,
|
430 |
+
"normalized": false,
|
431 |
+
"rstrip": false,
|
432 |
+
"single_word": false,
|
433 |
+
"special": true
|
434 |
+
},
|
435 |
+
"54": {
|
436 |
+
"content": "<[PLHD54_never_used]>",
|
437 |
+
"lstrip": false,
|
438 |
+
"normalized": false,
|
439 |
+
"rstrip": false,
|
440 |
+
"single_word": false,
|
441 |
+
"special": true
|
442 |
+
},
|
443 |
+
"55": {
|
444 |
+
"content": "<[PLHD55_never_used]>",
|
445 |
+
"lstrip": false,
|
446 |
+
"normalized": false,
|
447 |
+
"rstrip": false,
|
448 |
+
"single_word": false,
|
449 |
+
"special": true
|
450 |
+
},
|
451 |
+
"56": {
|
452 |
+
"content": "<[PLHD56_never_used]>",
|
453 |
+
"lstrip": false,
|
454 |
+
"normalized": false,
|
455 |
+
"rstrip": false,
|
456 |
+
"single_word": false,
|
457 |
+
"special": true
|
458 |
+
},
|
459 |
+
"57": {
|
460 |
+
"content": "<[PLHD57_never_used]>",
|
461 |
+
"lstrip": false,
|
462 |
+
"normalized": false,
|
463 |
+
"rstrip": false,
|
464 |
+
"single_word": false,
|
465 |
+
"special": true
|
466 |
+
},
|
467 |
+
"58": {
|
468 |
+
"content": "<[PLHD58_never_used]>",
|
469 |
+
"lstrip": false,
|
470 |
+
"normalized": false,
|
471 |
+
"rstrip": false,
|
472 |
+
"single_word": false,
|
473 |
+
"special": true
|
474 |
+
},
|
475 |
+
"59": {
|
476 |
+
"content": "<[PLHD59_never_used]>",
|
477 |
+
"lstrip": false,
|
478 |
+
"normalized": false,
|
479 |
+
"rstrip": false,
|
480 |
+
"single_word": false,
|
481 |
+
"special": true
|
482 |
+
},
|
483 |
+
"60": {
|
484 |
+
"content": "<[PLHD60_never_used]>",
|
485 |
+
"lstrip": false,
|
486 |
+
"normalized": false,
|
487 |
+
"rstrip": false,
|
488 |
+
"single_word": false,
|
489 |
+
"special": true
|
490 |
+
},
|
491 |
+
"61": {
|
492 |
+
"content": "<[PLHD61_never_used]>",
|
493 |
+
"lstrip": false,
|
494 |
+
"normalized": false,
|
495 |
+
"rstrip": false,
|
496 |
+
"single_word": false,
|
497 |
+
"special": true
|
498 |
+
},
|
499 |
+
"62": {
|
500 |
+
"content": "<[PLHD62_never_used]>",
|
501 |
+
"lstrip": false,
|
502 |
+
"normalized": false,
|
503 |
+
"rstrip": false,
|
504 |
+
"single_word": false,
|
505 |
+
"special": true
|
506 |
+
},
|
507 |
+
"63": {
|
508 |
+
"content": "<[PLHD63_never_used]>",
|
509 |
+
"lstrip": false,
|
510 |
+
"normalized": false,
|
511 |
+
"rstrip": false,
|
512 |
+
"single_word": false,
|
513 |
+
"special": true
|
514 |
+
},
|
515 |
+
"64": {
|
516 |
+
"content": "<[PLHD64_never_used]>",
|
517 |
+
"lstrip": false,
|
518 |
+
"normalized": false,
|
519 |
+
"rstrip": false,
|
520 |
+
"single_word": false,
|
521 |
+
"special": true
|
522 |
+
},
|
523 |
+
"65": {
|
524 |
+
"content": "<[PLHD65_never_used]>",
|
525 |
+
"lstrip": false,
|
526 |
+
"normalized": false,
|
527 |
+
"rstrip": false,
|
528 |
+
"single_word": false,
|
529 |
+
"special": true
|
530 |
+
},
|
531 |
+
"66": {
|
532 |
+
"content": "<[PLHD66_never_used]>",
|
533 |
+
"lstrip": false,
|
534 |
+
"normalized": false,
|
535 |
+
"rstrip": false,
|
536 |
+
"single_word": false,
|
537 |
+
"special": true
|
538 |
+
},
|
539 |
+
"67": {
|
540 |
+
"content": "<[PLHD67_never_used]>",
|
541 |
+
"lstrip": false,
|
542 |
+
"normalized": false,
|
543 |
+
"rstrip": false,
|
544 |
+
"single_word": false,
|
545 |
+
"special": true
|
546 |
+
},
|
547 |
+
"68": {
|
548 |
+
"content": "<[PLHD68_never_used]>",
|
549 |
+
"lstrip": false,
|
550 |
+
"normalized": false,
|
551 |
+
"rstrip": false,
|
552 |
+
"single_word": false,
|
553 |
+
"special": true
|
554 |
+
},
|
555 |
+
"69": {
|
556 |
+
"content": "<[PLHD69_never_used]>",
|
557 |
+
"lstrip": false,
|
558 |
+
"normalized": false,
|
559 |
+
"rstrip": false,
|
560 |
+
"single_word": false,
|
561 |
+
"special": true
|
562 |
+
},
|
563 |
+
"70": {
|
564 |
+
"content": "<[PLHD70_never_used]>",
|
565 |
+
"lstrip": false,
|
566 |
+
"normalized": false,
|
567 |
+
"rstrip": false,
|
568 |
+
"single_word": false,
|
569 |
+
"special": true
|
570 |
+
},
|
571 |
+
"71": {
|
572 |
+
"content": "<[PLHD71_never_used]>",
|
573 |
+
"lstrip": false,
|
574 |
+
"normalized": false,
|
575 |
+
"rstrip": false,
|
576 |
+
"single_word": false,
|
577 |
+
"special": true
|
578 |
+
},
|
579 |
+
"72": {
|
580 |
+
"content": "<[PLHD72_never_used]>",
|
581 |
+
"lstrip": false,
|
582 |
+
"normalized": false,
|
583 |
+
"rstrip": false,
|
584 |
+
"single_word": false,
|
585 |
+
"special": true
|
586 |
+
},
|
587 |
+
"73": {
|
588 |
+
"content": "<[PLHD73_never_used]>",
|
589 |
+
"lstrip": false,
|
590 |
+
"normalized": false,
|
591 |
+
"rstrip": false,
|
592 |
+
"single_word": false,
|
593 |
+
"special": true
|
594 |
+
},
|
595 |
+
"74": {
|
596 |
+
"content": "<[PLHD74_never_used]>",
|
597 |
+
"lstrip": false,
|
598 |
+
"normalized": false,
|
599 |
+
"rstrip": false,
|
600 |
+
"single_word": false,
|
601 |
+
"special": true
|
602 |
+
},
|
603 |
+
"75": {
|
604 |
+
"content": "<[PLHD75_never_used]>",
|
605 |
+
"lstrip": false,
|
606 |
+
"normalized": false,
|
607 |
+
"rstrip": false,
|
608 |
+
"single_word": false,
|
609 |
+
"special": true
|
610 |
+
},
|
611 |
+
"76": {
|
612 |
+
"content": "<[PLHD76_never_used]>",
|
613 |
+
"lstrip": false,
|
614 |
+
"normalized": false,
|
615 |
+
"rstrip": false,
|
616 |
+
"single_word": false,
|
617 |
+
"special": true
|
618 |
+
},
|
619 |
+
"77": {
|
620 |
+
"content": "<[PLHD77_never_used]>",
|
621 |
+
"lstrip": false,
|
622 |
+
"normalized": false,
|
623 |
+
"rstrip": false,
|
624 |
+
"single_word": false,
|
625 |
+
"special": true
|
626 |
+
},
|
627 |
+
"78": {
|
628 |
+
"content": "<[PLHD78_never_used]>",
|
629 |
+
"lstrip": false,
|
630 |
+
"normalized": false,
|
631 |
+
"rstrip": false,
|
632 |
+
"single_word": false,
|
633 |
+
"special": true
|
634 |
+
},
|
635 |
+
"79": {
|
636 |
+
"content": "<[PLHD79_never_used]>",
|
637 |
+
"lstrip": false,
|
638 |
+
"normalized": false,
|
639 |
+
"rstrip": false,
|
640 |
+
"single_word": false,
|
641 |
+
"special": true
|
642 |
+
},
|
643 |
+
"80": {
|
644 |
+
"content": "<[PLHD80_never_used]>",
|
645 |
+
"lstrip": false,
|
646 |
+
"normalized": false,
|
647 |
+
"rstrip": false,
|
648 |
+
"single_word": false,
|
649 |
+
"special": true
|
650 |
+
},
|
651 |
+
"81": {
|
652 |
+
"content": "<[PLHD81_never_used]>",
|
653 |
+
"lstrip": false,
|
654 |
+
"normalized": false,
|
655 |
+
"rstrip": false,
|
656 |
+
"single_word": false,
|
657 |
+
"special": true
|
658 |
+
},
|
659 |
+
"82": {
|
660 |
+
"content": "<[PLHD82_never_used]>",
|
661 |
+
"lstrip": false,
|
662 |
+
"normalized": false,
|
663 |
+
"rstrip": false,
|
664 |
+
"single_word": false,
|
665 |
+
"special": true
|
666 |
+
},
|
667 |
+
"83": {
|
668 |
+
"content": "<[PLHD83_never_used]>",
|
669 |
+
"lstrip": false,
|
670 |
+
"normalized": false,
|
671 |
+
"rstrip": false,
|
672 |
+
"single_word": false,
|
673 |
+
"special": true
|
674 |
+
},
|
675 |
+
"84": {
|
676 |
+
"content": "<[PLHD84_never_used]>",
|
677 |
+
"lstrip": false,
|
678 |
+
"normalized": false,
|
679 |
+
"rstrip": false,
|
680 |
+
"single_word": false,
|
681 |
+
"special": true
|
682 |
+
},
|
683 |
+
"85": {
|
684 |
+
"content": "<[PLHD85_never_used]>",
|
685 |
+
"lstrip": false,
|
686 |
+
"normalized": false,
|
687 |
+
"rstrip": false,
|
688 |
+
"single_word": false,
|
689 |
+
"special": true
|
690 |
+
},
|
691 |
+
"86": {
|
692 |
+
"content": "<[PLHD86_never_used]>",
|
693 |
+
"lstrip": false,
|
694 |
+
"normalized": false,
|
695 |
+
"rstrip": false,
|
696 |
+
"single_word": false,
|
697 |
+
"special": true
|
698 |
+
},
|
699 |
+
"87": {
|
700 |
+
"content": "<[PLHD87_never_used]>",
|
701 |
+
"lstrip": false,
|
702 |
+
"normalized": false,
|
703 |
+
"rstrip": false,
|
704 |
+
"single_word": false,
|
705 |
+
"special": true
|
706 |
+
},
|
707 |
+
"88": {
|
708 |
+
"content": "<[PLHD88_never_used]>",
|
709 |
+
"lstrip": false,
|
710 |
+
"normalized": false,
|
711 |
+
"rstrip": false,
|
712 |
+
"single_word": false,
|
713 |
+
"special": true
|
714 |
+
},
|
715 |
+
"89": {
|
716 |
+
"content": "<[PLHD89_never_used]>",
|
717 |
+
"lstrip": false,
|
718 |
+
"normalized": false,
|
719 |
+
"rstrip": false,
|
720 |
+
"single_word": false,
|
721 |
+
"special": true
|
722 |
+
},
|
723 |
+
"90": {
|
724 |
+
"content": "<[PLHD90_never_used]>",
|
725 |
+
"lstrip": false,
|
726 |
+
"normalized": false,
|
727 |
+
"rstrip": false,
|
728 |
+
"single_word": false,
|
729 |
+
"special": true
|
730 |
+
},
|
731 |
+
"91": {
|
732 |
+
"content": "<[PLHD91_never_used]>",
|
733 |
+
"lstrip": false,
|
734 |
+
"normalized": false,
|
735 |
+
"rstrip": false,
|
736 |
+
"single_word": false,
|
737 |
+
"special": true
|
738 |
+
},
|
739 |
+
"92": {
|
740 |
+
"content": "<[PLHD92_never_used]>",
|
741 |
+
"lstrip": false,
|
742 |
+
"normalized": false,
|
743 |
+
"rstrip": false,
|
744 |
+
"single_word": false,
|
745 |
+
"special": true
|
746 |
+
},
|
747 |
+
"93": {
|
748 |
+
"content": "<[PLHD93_never_used]>",
|
749 |
+
"lstrip": false,
|
750 |
+
"normalized": false,
|
751 |
+
"rstrip": false,
|
752 |
+
"single_word": false,
|
753 |
+
"special": true
|
754 |
+
},
|
755 |
+
"94": {
|
756 |
+
"content": "<[PLHD94_never_used]>",
|
757 |
+
"lstrip": false,
|
758 |
+
"normalized": false,
|
759 |
+
"rstrip": false,
|
760 |
+
"single_word": false,
|
761 |
+
"special": true
|
762 |
+
},
|
763 |
+
"95": {
|
764 |
+
"content": "<[PLHD95_never_used]>",
|
765 |
+
"lstrip": false,
|
766 |
+
"normalized": false,
|
767 |
+
"rstrip": false,
|
768 |
+
"single_word": false,
|
769 |
+
"special": true
|
770 |
+
},
|
771 |
+
"96": {
|
772 |
+
"content": "<[PLHD96_never_used]>",
|
773 |
+
"lstrip": false,
|
774 |
+
"normalized": false,
|
775 |
+
"rstrip": false,
|
776 |
+
"single_word": false,
|
777 |
+
"special": true
|
778 |
+
},
|
779 |
+
"97": {
|
780 |
+
"content": "<[PLHD97_never_used]>",
|
781 |
+
"lstrip": false,
|
782 |
+
"normalized": false,
|
783 |
+
"rstrip": false,
|
784 |
+
"single_word": false,
|
785 |
+
"special": true
|
786 |
+
},
|
787 |
+
"98": {
|
788 |
+
"content": "<[PLHD98_never_used]>",
|
789 |
+
"lstrip": false,
|
790 |
+
"normalized": false,
|
791 |
+
"rstrip": false,
|
792 |
+
"single_word": false,
|
793 |
+
"special": true
|
794 |
+
},
|
795 |
+
"99": {
|
796 |
+
"content": "<[PLHD99_never_used]>",
|
797 |
+
"lstrip": false,
|
798 |
+
"normalized": false,
|
799 |
+
"rstrip": false,
|
800 |
+
"single_word": false,
|
801 |
+
"special": true
|
802 |
+
},
|
803 |
+
"100": {
|
804 |
+
"content": "<[PLHD100_never_used]>",
|
805 |
+
"lstrip": false,
|
806 |
+
"normalized": false,
|
807 |
+
"rstrip": false,
|
808 |
+
"single_word": false,
|
809 |
+
"special": true
|
810 |
+
},
|
811 |
+
"101": {
|
812 |
+
"content": "<[PLHD101_never_used]>",
|
813 |
+
"lstrip": false,
|
814 |
+
"normalized": false,
|
815 |
+
"rstrip": false,
|
816 |
+
"single_word": false,
|
817 |
+
"special": true
|
818 |
+
},
|
819 |
+
"102": {
|
820 |
+
"content": "<[PLHD102_never_used]>",
|
821 |
+
"lstrip": false,
|
822 |
+
"normalized": false,
|
823 |
+
"rstrip": false,
|
824 |
+
"single_word": false,
|
825 |
+
"special": true
|
826 |
+
},
|
827 |
+
"103": {
|
828 |
+
"content": "<[PLHD103_never_used]>",
|
829 |
+
"lstrip": false,
|
830 |
+
"normalized": false,
|
831 |
+
"rstrip": false,
|
832 |
+
"single_word": false,
|
833 |
+
"special": true
|
834 |
+
},
|
835 |
+
"104": {
|
836 |
+
"content": "<[PLHD104_never_used]>",
|
837 |
+
"lstrip": false,
|
838 |
+
"normalized": false,
|
839 |
+
"rstrip": false,
|
840 |
+
"single_word": false,
|
841 |
+
"special": true
|
842 |
+
},
|
843 |
+
"105": {
|
844 |
+
"content": "<[PLHD105_never_used]>",
|
845 |
+
"lstrip": false,
|
846 |
+
"normalized": false,
|
847 |
+
"rstrip": false,
|
848 |
+
"single_word": false,
|
849 |
+
"special": true
|
850 |
+
},
|
851 |
+
"106": {
|
852 |
+
"content": "<[PLHD106_never_used]>",
|
853 |
+
"lstrip": false,
|
854 |
+
"normalized": false,
|
855 |
+
"rstrip": false,
|
856 |
+
"single_word": false,
|
857 |
+
"special": true
|
858 |
+
},
|
859 |
+
"107": {
|
860 |
+
"content": "<[PLHD107_never_used]>",
|
861 |
+
"lstrip": false,
|
862 |
+
"normalized": false,
|
863 |
+
"rstrip": false,
|
864 |
+
"single_word": false,
|
865 |
+
"special": true
|
866 |
+
},
|
867 |
+
"108": {
|
868 |
+
"content": "<[PLHD108_never_used]>",
|
869 |
+
"lstrip": false,
|
870 |
+
"normalized": false,
|
871 |
+
"rstrip": false,
|
872 |
+
"single_word": false,
|
873 |
+
"special": true
|
874 |
+
},
|
875 |
+
"109": {
|
876 |
+
"content": "<[PLHD109_never_used]>",
|
877 |
+
"lstrip": false,
|
878 |
+
"normalized": false,
|
879 |
+
"rstrip": false,
|
880 |
+
"single_word": false,
|
881 |
+
"special": true
|
882 |
+
},
|
883 |
+
"110": {
|
884 |
+
"content": "<[PLHD110_never_used]>",
|
885 |
+
"lstrip": false,
|
886 |
+
"normalized": false,
|
887 |
+
"rstrip": false,
|
888 |
+
"single_word": false,
|
889 |
+
"special": true
|
890 |
+
},
|
891 |
+
"111": {
|
892 |
+
"content": "<[PLHD111_never_used]>",
|
893 |
+
"lstrip": false,
|
894 |
+
"normalized": false,
|
895 |
+
"rstrip": false,
|
896 |
+
"single_word": false,
|
897 |
+
"special": true
|
898 |
+
},
|
899 |
+
"112": {
|
900 |
+
"content": "<[PLHD112_never_used]>",
|
901 |
+
"lstrip": false,
|
902 |
+
"normalized": false,
|
903 |
+
"rstrip": false,
|
904 |
+
"single_word": false,
|
905 |
+
"special": true
|
906 |
+
},
|
907 |
+
"113": {
|
908 |
+
"content": "<[PLHD113_never_used]>",
|
909 |
+
"lstrip": false,
|
910 |
+
"normalized": false,
|
911 |
+
"rstrip": false,
|
912 |
+
"single_word": false,
|
913 |
+
"special": true
|
914 |
+
},
|
915 |
+
"114": {
|
916 |
+
"content": "<[PLHD114_never_used]>",
|
917 |
+
"lstrip": false,
|
918 |
+
"normalized": false,
|
919 |
+
"rstrip": false,
|
920 |
+
"single_word": false,
|
921 |
+
"special": true
|
922 |
+
},
|
923 |
+
"115": {
|
924 |
+
"content": "<[PLHD115_never_used]>",
|
925 |
+
"lstrip": false,
|
926 |
+
"normalized": false,
|
927 |
+
"rstrip": false,
|
928 |
+
"single_word": false,
|
929 |
+
"special": true
|
930 |
+
},
|
931 |
+
"116": {
|
932 |
+
"content": "<[PLHD116_never_used]>",
|
933 |
+
"lstrip": false,
|
934 |
+
"normalized": false,
|
935 |
+
"rstrip": false,
|
936 |
+
"single_word": false,
|
937 |
+
"special": true
|
938 |
+
},
|
939 |
+
"117": {
|
940 |
+
"content": "<[PLHD117_never_used]>",
|
941 |
+
"lstrip": false,
|
942 |
+
"normalized": false,
|
943 |
+
"rstrip": false,
|
944 |
+
"single_word": false,
|
945 |
+
"special": true
|
946 |
+
},
|
947 |
+
"118": {
|
948 |
+
"content": "<[PLHD118_never_used]>",
|
949 |
+
"lstrip": false,
|
950 |
+
"normalized": false,
|
951 |
+
"rstrip": false,
|
952 |
+
"single_word": false,
|
953 |
+
"special": true
|
954 |
+
},
|
955 |
+
"119": {
|
956 |
+
"content": "<[PLHD119_never_used]>",
|
957 |
+
"lstrip": false,
|
958 |
+
"normalized": false,
|
959 |
+
"rstrip": false,
|
960 |
+
"single_word": false,
|
961 |
+
"special": true
|
962 |
+
},
|
963 |
+
"120": {
|
964 |
+
"content": "<[PLHD120_never_used]>",
|
965 |
+
"lstrip": false,
|
966 |
+
"normalized": false,
|
967 |
+
"rstrip": false,
|
968 |
+
"single_word": false,
|
969 |
+
"special": true
|
970 |
+
},
|
971 |
+
"121": {
|
972 |
+
"content": "<[PLHD121_never_used]>",
|
973 |
+
"lstrip": false,
|
974 |
+
"normalized": false,
|
975 |
+
"rstrip": false,
|
976 |
+
"single_word": false,
|
977 |
+
"special": true
|
978 |
+
},
|
979 |
+
"122": {
|
980 |
+
"content": "<[PLHD122_never_used]>",
|
981 |
+
"lstrip": false,
|
982 |
+
"normalized": false,
|
983 |
+
"rstrip": false,
|
984 |
+
"single_word": false,
|
985 |
+
"special": true
|
986 |
+
},
|
987 |
+
"123": {
|
988 |
+
"content": "<[PLHD123_never_used]>",
|
989 |
+
"lstrip": false,
|
990 |
+
"normalized": false,
|
991 |
+
"rstrip": false,
|
992 |
+
"single_word": false,
|
993 |
+
"special": true
|
994 |
+
},
|
995 |
+
"124": {
|
996 |
+
"content": "<[PLHD124_never_used]>",
|
997 |
+
"lstrip": false,
|
998 |
+
"normalized": false,
|
999 |
+
"rstrip": false,
|
1000 |
+
"single_word": false,
|
1001 |
+
"special": true
|
1002 |
+
},
|
1003 |
+
"125": {
|
1004 |
+
"content": "<[PLHD125_never_used]>",
|
1005 |
+
"lstrip": false,
|
1006 |
+
"normalized": false,
|
1007 |
+
"rstrip": false,
|
1008 |
+
"single_word": false,
|
1009 |
+
"special": true
|
1010 |
+
},
|
1011 |
+
"126": {
|
1012 |
+
"content": "<[PLHD126_never_used]>",
|
1013 |
+
"lstrip": false,
|
1014 |
+
"normalized": false,
|
1015 |
+
"rstrip": false,
|
1016 |
+
"single_word": false,
|
1017 |
+
"special": true
|
1018 |
+
},
|
1019 |
+
"127": {
|
1020 |
+
"content": "<[PLHD127_never_used]>",
|
1021 |
+
"lstrip": false,
|
1022 |
+
"normalized": false,
|
1023 |
+
"rstrip": false,
|
1024 |
+
"single_word": false,
|
1025 |
+
"special": true
|
1026 |
+
}
|
1027 |
+
},
|
1028 |
+
"bos_token": "<seed:bos>",
|
1029 |
+
"clean_up_tokenization_spaces": false,
|
1030 |
+
"eos_token": "<seed:eos>",
|
1031 |
+
"extra_special_tokens": {},
|
1032 |
+
"model_max_length": 1000000000000000019884624838656,
|
1033 |
+
"pad_token": "<seed:pad>",
|
1034 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
1035 |
+
}
|