pstjohn commited on
Commit
b0c46be
·
verified ·
1 Parent(s): d66fc85

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. README.md +33 -3
  2. config.json +39 -0
  3. esm_nv.py +374 -0
  4. model.safetensors +3 -0
  5. special_tokens_map.json +7 -0
  6. tokenizer_config.json +53 -0
  7. vocab.txt +33 -0
README.md CHANGED
@@ -1,3 +1,33 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ widget:
4
+ - text: "MQIFVKTLTGKTITLEVEPS<mask>TIENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVLRLRGG"
5
+ ---
6
+
7
+ ## ESM-2 (TransformerEngine-optimized)
8
+
9
+ This version of the ESM-2 model is optimized with NVIDIA's
10
+ [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) library. It is based on the
11
+ [original ESM-2 model](https://huggingface.co/facebook/esm2_t48_15B_UR50D) from Facebook Research,
12
+ and (within numerical precision) has identical weights and outputs.
13
+
14
+ ESM-2 is a state-of-the-art protein model trained on a masked language modelling objective. It is
15
+ suitable for fine-tuning on a wide range of tasks that take protein sequences as input. For detailed
16
+ information on the model architecture and training data, please refer to the [accompanying
17
+ paper](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v2). You may also be interested in
18
+ some demo notebooks
19
+ ([PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb),
20
+ [TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb))
21
+ which demonstrate how to fine-tune ESM-2 models on your tasks of interest.
22
+
23
+ Several ESM-2 checkpoints are available in the Hub with varying sizes. Larger sizes generally have
24
+ somewhat better accuracy, but require much more memory and time to train:
25
+
26
+ | Checkpoint name | Num layers | Num parameters |
27
+ |------------------------------|----|----------|
28
+ | [esm2_t48_15B_UR50D](https://huggingface.co/nvidia/esm2_t48_15B_UR50D) | 48 | 15B |
29
+ | [esm2_t36_3B_UR50D](https://huggingface.co/nvidia/esm2_t36_3B_UR50D) | 36 | 3B |
30
+ | [esm2_t33_650M_UR50D](https://huggingface.co/nvidia/esm2_t33_650M_UR50D) | 33 | 650M |
31
+ | [esm2_t30_150M_UR50D](https://huggingface.co/nvidia/esm2_t30_150M_UR50D) | 30 | 150M |
32
+ | [esm2_t12_35M_UR50D](https://huggingface.co/nvidia/esm2_t12_35M_UR50D) | 12 | 35M |
33
+ | [esm2_t6_8M_UR50D](https://huggingface.co/nvidia/esm2_t6_8M_UR50D) | 6 | 8M |
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "NVEsmForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "attn_input_format": "bshd",
7
+ "auto_map": {
8
+ "AutoConfig": "esm_nv.NVEsmConfig",
9
+ "AutoModel": "esm_nv.NVEsmModel",
10
+ "AutoModelForMaskedLM": "esm_nv.NVEsmForMaskedLM"
11
+ },
12
+ "classifier_dropout": null,
13
+ "emb_layer_norm_before": false,
14
+ "encoder_activation": "gelu",
15
+ "esmfold_config": null,
16
+ "fuse_qkv_params": true,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 320,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 1280,
22
+ "is_folding_model": false,
23
+ "layer_norm_eps": 1e-05,
24
+ "mask_token_id": 32,
25
+ "max_position_embeddings": 1026,
26
+ "micro_batch_size": null,
27
+ "model_type": "nv_esm",
28
+ "num_attention_heads": 20,
29
+ "num_hidden_layers": 6,
30
+ "pad_token_id": 1,
31
+ "position_embedding_type": "rotary",
32
+ "qkv_weight_interleaved": true,
33
+ "token_dropout": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.53.0.dev0",
36
+ "use_cache": true,
37
+ "vocab_list": null,
38
+ "vocab_size": 33
39
+ }
esm_nv.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: LicenseRef-Apache2
4
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """TransformerEngine-optimized ESM model. Adapted from `modeling_esm.py` in
19
+ huggingface/transformers."""
20
+
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ import transformer_engine.pytorch
26
+ from torch import nn
27
+ from torch.nn import CrossEntropyLoss
28
+ from transformer_engine.pytorch.attention.rope import RotaryPositionEmbedding
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPooling,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ MaskedLMOutput,
34
+ )
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.models.esm.configuration_esm import EsmConfig
37
+ from transformers.models.esm.modeling_esm import EsmEmbeddings, EsmPooler
38
+ from transformers.utils import logging
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ class NVEsmConfig(EsmConfig):
44
+ model_type: str = "nv_esm"
45
+
46
+ def __init__(
47
+ self,
48
+ qkv_weight_interleaved: bool = True,
49
+ encoder_activation: str = "gelu",
50
+ attn_input_format: str = "bshd",
51
+ fuse_qkv_params: bool = True,
52
+ micro_batch_size: Optional[int] = None,
53
+ **kwargs,
54
+ ):
55
+ """Initialize the NVEsmConfig with additional TE-related config options.
56
+
57
+ Args:
58
+ qkv_weight_interleaved: Whether to interleave the qkv weights. If set to `False`, the
59
+ QKV weight is interpreted as a concatenation of query, key, and value weights along
60
+ the `0th` dimension. The default interpretation is that the individual `q`, `k`, and
61
+ `v` weights for each attention head are interleaved. This parameter is set to `False`
62
+ when using :attr:`fuse_qkv_params=False`.
63
+ encoder_activation: The activation function to use in the encoder.
64
+ attn_input_format: The input format to use for the attention. This controls
65
+ whether the dimensions of the intermediate hidden states is 'batch first'
66
+ ('bshd') or 'sequence first' ('sbhd'). `s` stands for the sequence length,
67
+ `b` batch size, `h` the number of heads, `d` head size. Note that these
68
+ formats are very closely related to the `qkv_format` in the
69
+ `MultiHeadAttention` and `DotProductAttention` modules.
70
+ fuse_qkv_params: Whether to fuse the qkv parameters. If set to `True`,
71
+ `TransformerLayer` module exposes a single fused parameter for query-key-value.
72
+ This enables optimizations such as QKV fusion without concatentations/splits and
73
+ also enables the argument `fuse_wgrad_accumulation`.
74
+ micro_batch_size: The micro batch size to use for the attention. This is needed for
75
+ JIT Warmup, a technique where jit fused functions are warmed up before training to
76
+ ensure same kernels are used for forward propogation and activation recompute phase.
77
+ **kwargs: Additional config options to pass to EsmConfig.
78
+ """
79
+
80
+ super().__init__(**kwargs)
81
+ # Additional TE-related config options.
82
+ self.qkv_weight_interleaved = qkv_weight_interleaved
83
+ self.encoder_activation = encoder_activation
84
+ self.attn_input_format = attn_input_format
85
+ self.fuse_qkv_params = fuse_qkv_params
86
+ self.micro_batch_size = micro_batch_size
87
+
88
+
89
+ class NVEsmEncoder(nn.Module):
90
+ def __init__(self, config):
91
+ super().__init__()
92
+ self.config = config
93
+ self.layers = nn.ModuleList(
94
+ [
95
+ transformer_engine.pytorch.TransformerLayer(
96
+ hidden_size=config.hidden_size,
97
+ ffn_hidden_size=config.intermediate_size,
98
+ num_attention_heads=config.num_attention_heads,
99
+ layernorm_epsilon=config.layer_norm_eps,
100
+ hidden_dropout=config.hidden_dropout_prob,
101
+ attention_dropout=config.attention_probs_dropout_prob,
102
+ qkv_weight_interleaved=config.qkv_weight_interleaved,
103
+ layer_number=i + 1,
104
+ layer_type="encoder",
105
+ self_attn_mask_type="padding",
106
+ activation=config.encoder_activation,
107
+ attn_input_format=config.attn_input_format,
108
+ seq_length=config.max_length,
109
+ micro_batch_size=config.micro_batch_size,
110
+ num_gqa_groups=config.num_attention_heads,
111
+ fuse_qkv_params=config.fuse_qkv_params,
112
+ params_dtype=config.torch_dtype,
113
+ )
114
+ for i in range(config.num_hidden_layers)
115
+ ]
116
+ )
117
+ self.emb_layer_norm_after = transformer_engine.pytorch.LayerNorm(
118
+ config.hidden_size, eps=config.layer_norm_eps
119
+ )
120
+ if config.position_embedding_type == "rotary":
121
+ self.rotary_embeddings = RotaryPositionEmbedding(
122
+ config.hidden_size // config.num_attention_heads
123
+ )
124
+ self.te_rope_emb = self.rotary_embeddings(
125
+ max_seq_len=config.max_position_embeddings
126
+ ).cuda()
127
+ else:
128
+ self.te_rope_emb = None
129
+
130
+ def forward(
131
+ self,
132
+ hidden_states,
133
+ attention_mask=None,
134
+ output_hidden_states=False,
135
+ ):
136
+ all_hidden_states = () if output_hidden_states else None
137
+
138
+ for layer_module in self.layers:
139
+ if output_hidden_states:
140
+ all_hidden_states = all_hidden_states + (hidden_states,)
141
+
142
+ hidden_states = layer_module(
143
+ hidden_states,
144
+ attention_mask,
145
+ rotary_pos_emb=self.te_rope_emb,
146
+ )
147
+
148
+ hidden_states = self.emb_layer_norm_after(hidden_states)
149
+
150
+ if output_hidden_states:
151
+ all_hidden_states = all_hidden_states + (hidden_states,)
152
+
153
+ return BaseModelOutput(
154
+ last_hidden_state=hidden_states,
155
+ hidden_states=all_hidden_states,
156
+ )
157
+
158
+
159
+ class NVEsmPreTrainedModel(PreTrainedModel):
160
+ """
161
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
162
+ models.
163
+ """
164
+
165
+ config_class = NVEsmConfig
166
+ base_model_prefix = "esm"
167
+ supports_gradient_checkpointing = False
168
+ _no_split_modules = [
169
+ "TransformerLayer",
170
+ "EsmEmbeddings",
171
+ ]
172
+
173
+
174
+ class NVEsmModel(NVEsmPreTrainedModel):
175
+ """The ESM Encoder-only protein language model.
176
+
177
+ This model uses NVDIA's TransformerEngine to optimize attention layer training and inference.
178
+ """
179
+
180
+ def __init__(self, config, add_pooling_layer=True):
181
+ super().__init__(config)
182
+ self.config = config
183
+
184
+ self.embeddings = EsmEmbeddings(config)
185
+ self.encoder = NVEsmEncoder(config)
186
+ self.pooler = EsmPooler(config) if add_pooling_layer else None
187
+
188
+ # Initialize weights and apply final processing
189
+ self.post_init()
190
+
191
+ def get_input_embeddings(self):
192
+ return self.embeddings.word_embeddings
193
+
194
+ def set_input_embeddings(self, value):
195
+ self.embeddings.word_embeddings = value
196
+
197
+ def forward(
198
+ self,
199
+ input_ids: Optional[torch.Tensor] = None,
200
+ attention_mask: Optional[torch.Tensor] = None,
201
+ position_ids: Optional[torch.Tensor] = None,
202
+ head_mask: Optional[torch.Tensor] = None,
203
+ inputs_embeds: Optional[torch.Tensor] = None,
204
+ output_hidden_states: Optional[bool] = None,
205
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
206
+ r"""
207
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
208
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the
209
+ cross-attention if the model is configured as a decoder.
210
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
211
+ Mask to avoid performing attention on the padding token indices of the encoder input.
212
+ This mask is used in the cross-attention if the model is configured as a decoder. Mask
213
+ values selected in `[0, 1]`:
214
+
215
+ - 1 for tokens that are **not masked**,
216
+ - 0 for tokens that are **masked**.
217
+
218
+ Note that this mask is inverted when it is passed to TransformerEngine, which expects a
219
+ boolean mask where 1s are masked and 0s are not masked.
220
+ """
221
+ output_hidden_states = (
222
+ output_hidden_states
223
+ if output_hidden_states is not None
224
+ else self.config.output_hidden_states
225
+ )
226
+
227
+ if input_ids is not None and inputs_embeds is not None:
228
+ raise ValueError(
229
+ "You cannot specify both input_ids and inputs_embeds at the same time"
230
+ )
231
+ elif input_ids is not None:
232
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
233
+ input_shape = input_ids.size()
234
+ elif inputs_embeds is not None:
235
+ input_shape = inputs_embeds.size()[:-1]
236
+ else:
237
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
238
+
239
+ batch_size, seq_length = input_shape
240
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
241
+
242
+ if attention_mask is None:
243
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
244
+
245
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
246
+ # ourselves in which case we just need to make it broadcastable to all heads.
247
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
248
+ attention_mask, input_shape
249
+ )
250
+
251
+ # TE expects a boolean attention mask, where 1s are masked and 0s are not masked
252
+ extended_attention_mask = extended_attention_mask < -1
253
+
254
+ # Prepare head mask if needed
255
+ # 1.0 in head_mask indicate we keep the head
256
+ # attention_probs has shape bsz x n_heads x N x N
257
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
258
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
259
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
260
+
261
+ embedding_output = self.embeddings(
262
+ input_ids=input_ids,
263
+ position_ids=position_ids,
264
+ attention_mask=attention_mask,
265
+ inputs_embeds=inputs_embeds,
266
+ )
267
+ encoder_outputs = self.encoder(
268
+ embedding_output,
269
+ attention_mask=extended_attention_mask,
270
+ output_hidden_states=output_hidden_states,
271
+ )
272
+ sequence_output = encoder_outputs[0]
273
+ pooled_output = (
274
+ self.pooler(sequence_output) if self.pooler is not None else None
275
+ )
276
+
277
+ return BaseModelOutputWithPooling(
278
+ last_hidden_state=sequence_output,
279
+ pooler_output=pooled_output,
280
+ hidden_states=encoder_outputs.hidden_states,
281
+ )
282
+
283
+
284
+ class NVEsmForMaskedLM(NVEsmPreTrainedModel):
285
+ _tied_weights_keys = ["lm_head.decoder.weight"]
286
+
287
+ def __init__(self, config):
288
+ super().__init__(config)
289
+
290
+ if config.is_decoder:
291
+ logger.warning(
292
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
293
+ "bi-directional self-attention."
294
+ )
295
+
296
+ self.esm = NVEsmModel(config, add_pooling_layer=False)
297
+ self.lm_head = NVEsmLMHead(config)
298
+
299
+ self.init_weights()
300
+ self.post_init()
301
+
302
+ def get_output_embeddings(self):
303
+ return self.lm_head.decoder
304
+
305
+ def set_output_embeddings(self, new_embeddings):
306
+ self.lm_head.decoder = new_embeddings
307
+
308
+ def forward(
309
+ self,
310
+ input_ids: Optional[torch.LongTensor] = None,
311
+ attention_mask: Optional[torch.Tensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ inputs_embeds: Optional[torch.FloatTensor] = None,
314
+ labels: Optional[torch.LongTensor] = None,
315
+ output_hidden_states: Optional[bool] = None,
316
+ ) -> Union[Tuple, MaskedLMOutput]:
317
+ r"""
318
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
319
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
320
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
321
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
322
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
323
+ Used to hide legacy arguments that have been deprecated.
324
+ """
325
+ outputs = self.esm(
326
+ input_ids,
327
+ attention_mask=attention_mask,
328
+ position_ids=position_ids,
329
+ inputs_embeds=inputs_embeds,
330
+ output_hidden_states=output_hidden_states,
331
+ )
332
+ sequence_output = outputs[0]
333
+ prediction_scores = self.lm_head(sequence_output)
334
+
335
+ masked_lm_loss = None
336
+ if labels is not None:
337
+ loss_fct = CrossEntropyLoss()
338
+
339
+ labels = labels.to(prediction_scores.device)
340
+ masked_lm_loss = loss_fct(
341
+ prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
342
+ )
343
+
344
+ return MaskedLMOutput(
345
+ loss=masked_lm_loss,
346
+ logits=prediction_scores,
347
+ hidden_states=outputs.hidden_states,
348
+ )
349
+
350
+ def predict_contacts(self, tokens, attention_mask):
351
+ return self.esm.predict_contacts(tokens, attention_mask=attention_mask)
352
+
353
+
354
+ class NVEsmLMHead(nn.Module):
355
+ """ESM Head for masked language modeling using TransformerEngine."""
356
+
357
+ def __init__(self, config):
358
+ super().__init__()
359
+ self.dense = transformer_engine.pytorch.Linear(
360
+ config.hidden_size, config.hidden_size
361
+ )
362
+
363
+ self.decoder = transformer_engine.pytorch.LayerNormLinear(
364
+ config.hidden_size,
365
+ config.vocab_size,
366
+ bias=True,
367
+ eps=config.layer_norm_eps,
368
+ )
369
+
370
+ def forward(self, features, **kwargs):
371
+ x = self.dense(features)
372
+ x = torch.nn.functional.gelu(x)
373
+ x = self.decoder(x)
374
+ return x
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92a3baa930505a6140840261184d33f29f18c607ab8d7029b0a7101a72a12d2d
3
+ size 30062185
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "<cls>",
3
+ "eos_token": "<eos>",
4
+ "mask_token": "<mask>",
5
+ "pad_token": "<pad>",
6
+ "unk_token": "<unk>"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<cls>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<eos>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32": {
36
+ "content": "<mask>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "<cls>",
46
+ "eos_token": "<eos>",
47
+ "extra_special_tokens": {},
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "pad_token": "<pad>",
51
+ "tokenizer_class": "EsmTokenizer",
52
+ "unk_token": "<unk>"
53
+ }
vocab.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <cls>
2
+ <pad>
3
+ <eos>
4
+ <unk>
5
+ L
6
+ A
7
+ G
8
+ V
9
+ S
10
+ E
11
+ R
12
+ T
13
+ I
14
+ D
15
+ P
16
+ K
17
+ Q
18
+ N
19
+ F
20
+ Y
21
+ M
22
+ H
23
+ W
24
+ C
25
+ X
26
+ B
27
+ U
28
+ Z
29
+ O
30
+ .
31
+ -
32
+ <null_1>
33
+ <mask>