Upload tokenizer
Browse files- special_tokens_map.json +6 -42
- tokenizer.json +0 -0
- tokenizer_config.json +0 -7
    	
        special_tokens_map.json
    CHANGED
    
    | @@ -1,46 +1,10 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
            -
              "bos_token":  | 
| 3 | 
            -
             | 
| 4 | 
            -
             | 
| 5 | 
            -
             | 
| 6 | 
            -
             | 
| 7 | 
            -
             | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "cls_token": {
         | 
| 10 | 
            -
                "content": "[CLS]",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "eos_token": {
         | 
| 17 | 
            -
                "content": "[SEP]",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "mask_token": {
         | 
| 24 | 
            -
                "content": "[MASK]",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              },
         | 
| 30 | 
            -
              "pad_token": {
         | 
| 31 | 
            -
                "content": "[PAD]",
         | 
| 32 | 
            -
                "lstrip": false,
         | 
| 33 | 
            -
                "normalized": false,
         | 
| 34 | 
            -
                "rstrip": false,
         | 
| 35 | 
            -
                "single_word": false
         | 
| 36 | 
            -
              },
         | 
| 37 | 
            -
              "sep_token": {
         | 
| 38 | 
            -
                "content": "[SEP]",
         | 
| 39 | 
            -
                "lstrip": false,
         | 
| 40 | 
            -
                "normalized": false,
         | 
| 41 | 
            -
                "rstrip": false,
         | 
| 42 | 
            -
                "single_word": false
         | 
| 43 | 
            -
              },
         | 
| 44 | 
             
              "unk_token": {
         | 
| 45 | 
             
                "content": "[UNK]",
         | 
| 46 | 
             
                "lstrip": false,
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
            +
              "bos_token": "[CLS]",
         | 
| 3 | 
            +
              "cls_token": "[CLS]",
         | 
| 4 | 
            +
              "eos_token": "[SEP]",
         | 
| 5 | 
            +
              "mask_token": "[MASK]",
         | 
| 6 | 
            +
              "pad_token": "[PAD]",
         | 
| 7 | 
            +
              "sep_token": "[SEP]",
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 8 | 
             
              "unk_token": {
         | 
| 9 | 
             
                "content": "[UNK]",
         | 
| 10 | 
             
                "lstrip": false,
         | 
    	
        tokenizer.json
    CHANGED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    CHANGED
    
    | @@ -47,19 +47,12 @@ | |
| 47 | 
             
              "do_lower_case": false,
         | 
| 48 | 
             
              "eos_token": "[SEP]",
         | 
| 49 | 
             
              "mask_token": "[MASK]",
         | 
| 50 | 
            -
              "max_length": 256,
         | 
| 51 | 
             
              "model_max_length": 1000000000000000019884624838656,
         | 
| 52 | 
            -
              "pad_to_multiple_of": null,
         | 
| 53 | 
             
              "pad_token": "[PAD]",
         | 
| 54 | 
            -
              "pad_token_type_id": 0,
         | 
| 55 | 
            -
              "padding_side": "right",
         | 
| 56 | 
             
              "sep_token": "[SEP]",
         | 
| 57 | 
             
              "sp_model_kwargs": {},
         | 
| 58 | 
             
              "split_by_punct": false,
         | 
| 59 | 
            -
              "stride": 0,
         | 
| 60 | 
             
              "tokenizer_class": "DebertaV2Tokenizer",
         | 
| 61 | 
            -
              "truncation_side": "right",
         | 
| 62 | 
            -
              "truncation_strategy": "longest_first",
         | 
| 63 | 
             
              "unk_token": "[UNK]",
         | 
| 64 | 
             
              "vocab_type": "spm"
         | 
| 65 | 
             
            }
         | 
|  | |
| 47 | 
             
              "do_lower_case": false,
         | 
| 48 | 
             
              "eos_token": "[SEP]",
         | 
| 49 | 
             
              "mask_token": "[MASK]",
         | 
|  | |
| 50 | 
             
              "model_max_length": 1000000000000000019884624838656,
         | 
|  | |
| 51 | 
             
              "pad_token": "[PAD]",
         | 
|  | |
|  | |
| 52 | 
             
              "sep_token": "[SEP]",
         | 
| 53 | 
             
              "sp_model_kwargs": {},
         | 
| 54 | 
             
              "split_by_punct": false,
         | 
|  | |
| 55 | 
             
              "tokenizer_class": "DebertaV2Tokenizer",
         | 
|  | |
|  | |
| 56 | 
             
              "unk_token": "[UNK]",
         | 
| 57 | 
             
              "vocab_type": "spm"
         | 
| 58 | 
             
            }
         | 

