Update tokenizer files
Browse files- tokenizer.json +1 -10
- tokenizer_config.json +2 -9
tokenizer.json
CHANGED
@@ -254,15 +254,6 @@
|
|
254 |
"rstrip": false,
|
255 |
"normalized": true,
|
256 |
"special": false
|
257 |
-
},
|
258 |
-
{
|
259 |
-
"id": 50280,
|
260 |
-
"content": "<padding>",
|
261 |
-
"single_word": false,
|
262 |
-
"lstrip": false,
|
263 |
-
"rstrip": false,
|
264 |
-
"normalized": false,
|
265 |
-
"special": true
|
266 |
}
|
267 |
],
|
268 |
"normalizer": {
|
@@ -250610,4 +250601,4 @@
|
|
250610 |
]
|
250611 |
]
|
250612 |
}
|
250613 |
-
}
|
|
|
254 |
"rstrip": false,
|
255 |
"normalized": true,
|
256 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
}
|
258 |
],
|
259 |
"normalizer": {
|
|
|
250601 |
]
|
250602 |
]
|
250603 |
}
|
250604 |
+
}
|
tokenizer_config.json
CHANGED
@@ -226,14 +226,6 @@
|
|
226 |
"rstrip": false,
|
227 |
"single_word": false,
|
228 |
"special": false
|
229 |
-
},
|
230 |
-
"50280": {
|
231 |
-
"content": "<|padding|>",
|
232 |
-
"lstrip": false,
|
233 |
-
"normalized": false,
|
234 |
-
"rstrip": false,
|
235 |
-
"single_word": false,
|
236 |
-
"special": true
|
237 |
}
|
238 |
},
|
239 |
"bos_token": "<|endoftext|>",
|
@@ -242,7 +234,8 @@
|
|
242 |
"eos_token": "<|endoftext|>",
|
243 |
"extra_special_tokens": {},
|
244 |
"model_max_length": 1000000000000000019884624838656,
|
245 |
-
"pad_token": "
|
246 |
"tokenizer_class": "GPTNeoXTokenizer",
|
247 |
"unk_token": "<|endoftext|>"
|
248 |
}
|
|
|
|
226 |
"rstrip": false,
|
227 |
"single_word": false,
|
228 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
}
|
230 |
},
|
231 |
"bos_token": "<|endoftext|>",
|
|
|
234 |
"eos_token": "<|endoftext|>",
|
235 |
"extra_special_tokens": {},
|
236 |
"model_max_length": 1000000000000000019884624838656,
|
237 |
+
"pad_token": "<|padding|>",
|
238 |
"tokenizer_class": "GPTNeoXTokenizer",
|
239 |
"unk_token": "<|endoftext|>"
|
240 |
}
|
241 |
+
|