code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def is_first_subword(self, tokens: Union[str, int, List[str], List[int]]) \
-> Union[bool, List[bool]]:
"""Whether the token is the first subword token in a list of subword tokens
Parameters
----------
tokens
The input tokens
Returns
-------
ret
Whether the token is the first subword token in a sequence of subword tokens
that construct the token
"""
if isinstance(tokens, str):
return tokens.startswith(self._meta_symbol)
elif isinstance(tokens, int):
return tokens in self._first_subword_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [ele.startswith(self._meta_symbol) for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._first_subword_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
|
Whether the token is the first subword token in a list of subword tokens
Parameters
----------
tokens
The input tokens
Returns
-------
ret
Whether the token is the first subword token in a sequence of subword tokens
that construct the token
|
is_first_subword
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/data/tokenizers/yttm.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/yttm.py
|
Apache-2.0
|
def __getstate__(self):
"""Support multiprocessing by making it pickleble"""
state = self.__dict__.copy()
state['_bpe'] = None
return state
|
Support multiprocessing by making it pickleble
|
__getstate__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/data/tokenizers/yttm.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/yttm.py
|
Apache-2.0
|
def list_sources(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`source`) for the
specified token embedding name (`embedding_name`). If the text embedding name is set to
None, returns a dict mapping each valid token embedding name to a list of valid pre-trained
files (`source`).
"""
if embedding_name is not None:
embedding_name = embedding_name.lower()
if embedding_name == 'fasttext.bin':
return list(C.FAST_TEXT_BIN_SHA1.keys())
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` {}. Use '
'`list_sources(embedding_name=None).keys()` to get all the valid'
'embedding names.'.format(embedding_name))
return list(text_embedding_reg[embedding_name].keys())
else:
return {embedding_name: list(embedding_cls.keys())
for embedding_name, embedding_cls in text_embedding_reg.items()}
|
Get valid token embedding names and their pre-trained file names.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`source`) for the
specified token embedding name (`embedding_name`). If the text embedding name is set to
None, returns a dict mapping each valid token embedding name to a list of valid pre-trained
files (`source`).
|
list_sources
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/embedding/embed_loader.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/embedding/embed_loader.py
|
Apache-2.0
|
def load_embeddings(vocab=None, pretrained_name_or_dir='glove.6B.50d', unknown='<unk>',
unk_method=None):
"""Load pretrained word embeddings for building an embedding matrix for a given Vocab.
This function supports loading GloVe, Word2Vec and FastText word embeddings from remote sources.
You can also load your own embedding file(txt with Word2Vec or GloVe format) from a given file path.
Glove: an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and
the resulting representations showcase interesting linear substructures of the word vector
space. (Source from https://nlp.stanford.edu/projects/glove/)
Available sources:
['glove.42B.300d', 'glove.6B.100d', 'glove.6B.200d', 'glove.6B.300d', 'glove.6B.50d', \
'glove.840B.300d', 'glove.twitter.27B.100d', 'glove.twitter.27B.200d', \
'glove.twitter.27B.25d', 'glove.twitter.27B.50d']
Word2Vec: an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed with continuous bag-of-words or skip-gram architecture for computing vector
representations of words.
Available sources:
['GoogleNews-vectors-negative300', 'freebase-vectors-skipgram1000', \
'freebase-vectors-skipgram1000-en']
FastText: an open-source, free, lightweight library that allows users to learn text
representations and text classifiers. It works on standard, generic hardware. Models can later
be reduced in size to even fit on mobile devices. (Source from https://fasttext.cc/)
Available sources:
['cc.af.300', ..., 'cc.en.300', ..., 'crawl-300d-2M', 'crawl-300d-2M-subword', \
'wiki-news-300d-1M', 'wiki-news-300d-1M-subword', \
'wiki.aa', ..., 'wiki.multi.ar', ..., 'wiki.zu']
Detailed sources can be founded by `gluonnlp.embedding.list_sources('FastText')`
For 'wiki.multi' embedding:
Word Translation Without Parallel Data
Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herve Jegou.
https://arxiv.org/abs/1710.04087
Parameters
----------
vocab : gluonnlp.data.Vocab object, default None
A vocabulary on which an embedding matrix is built.
If `vocab` is `None`, then all tokens in the pretrained file will be used.
pretrained_name_or_dir : str, default 'glove.6B.50d'
A file path for a pretrained embedding file or the name of the pretrained token embedding file.
This method would first check if it is a file path.
If not, the method will load from cache or download.
unknown : str, default '<unk>'
To specify the unknown token in the pretrained file.
unk_method : Callable, default None
A function which receives `List[str]` and returns `numpy.ndarray`.
The input of the function is a list of words which are in the `vocab`,
but do not occur in the pretrained file.
And the function is aimed to return an embedding matrix for these words.
If `unk_method` is None, we generate vectors for these words,
by sampling from normal distribution with the same std and mean of the embedding matrix.
It is only useful when `vocab` is not `None`.
Returns
-------
If `vocab` is `None`
numpy.ndarray:
An embedding matrix in the pretrained file.
gluonnlp.data.Vocab:
The vocabulary in the pretrained file.
Otherwise,
numpy.ndarray:
An embedding matrix for the given vocabulary.
"""
assert isinstance(vocab, (Vocab, type(None))), "Only gluonnlp.data.Vocab is supported."
file_path = _check_and_get_path(pretrained_name_or_dir)
if file_path is None:
raise ValueError("Cannot recognize `{}`".format(pretrained_name_or_dir))
if file_path.endswith('.npz'):
matrix, result = _load_embedding_npz(file_path, vocab, unknown)
else:
matrix, result = _load_embedding_txt(file_path, vocab, unknown)
dim = matrix.shape[-1]
logging.info("Pre-trained embedding dim: {}".format(dim))
if vocab is None:
return matrix, result
else:
hit_flags = result
total_hits = sum(hit_flags)
logging.info("Found {} out of {} words in the pretrained embedding.".format(total_hits, len(vocab)))
if total_hits != len(vocab):
if unk_method is None:
found_vectors = matrix[hit_flags]
mean = np.mean(found_vectors, axis=0, keepdims=True)
std = np.std(found_vectors, axis=0, keepdims=True)
unfound_vec_num = len(vocab) - total_hits
r_vecs = np.random.randn(unfound_vec_num, dim).astype('float32') * std + mean
matrix[hit_flags == False] = r_vecs
else:
unk_idxs = (hit_flags == False).nonzero()[0]
matrix[hit_flags == False] = unk_method(vocab.to_tokens(unk_idxs))
return matrix
|
Load pretrained word embeddings for building an embedding matrix for a given Vocab.
This function supports loading GloVe, Word2Vec and FastText word embeddings from remote sources.
You can also load your own embedding file(txt with Word2Vec or GloVe format) from a given file path.
Glove: an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and
the resulting representations showcase interesting linear substructures of the word vector
space. (Source from https://nlp.stanford.edu/projects/glove/)
Available sources:
['glove.42B.300d', 'glove.6B.100d', 'glove.6B.200d', 'glove.6B.300d', 'glove.6B.50d', 'glove.840B.300d', 'glove.twitter.27B.100d', 'glove.twitter.27B.200d', 'glove.twitter.27B.25d', 'glove.twitter.27B.50d']
Word2Vec: an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed with continuous bag-of-words or skip-gram architecture for computing vector
representations of words.
Available sources:
['GoogleNews-vectors-negative300', 'freebase-vectors-skipgram1000', 'freebase-vectors-skipgram1000-en']
FastText: an open-source, free, lightweight library that allows users to learn text
representations and text classifiers. It works on standard, generic hardware. Models can later
be reduced in size to even fit on mobile devices. (Source from https://fasttext.cc/)
Available sources:
['cc.af.300', ..., 'cc.en.300', ..., 'crawl-300d-2M', 'crawl-300d-2M-subword', 'wiki-news-300d-1M', 'wiki-news-300d-1M-subword', 'wiki.aa', ..., 'wiki.multi.ar', ..., 'wiki.zu']
Detailed sources can be founded by `gluonnlp.embedding.list_sources('FastText')`
For 'wiki.multi' embedding:
Word Translation Without Parallel Data
Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herve Jegou.
https://arxiv.org/abs/1710.04087
Parameters
----------
vocab : gluonnlp.data.Vocab object, default None
A vocabulary on which an embedding matrix is built.
If `vocab` is `None`, then all tokens in the pretrained file will be used.
pretrained_name_or_dir : str, default 'glove.6B.50d'
A file path for a pretrained embedding file or the name of the pretrained token embedding file.
This method would first check if it is a file path.
If not, the method will load from cache or download.
unknown : str, default '<unk>'
To specify the unknown token in the pretrained file.
unk_method : Callable, default None
A function which receives `List[str]` and returns `numpy.ndarray`.
The input of the function is a list of words which are in the `vocab`,
but do not occur in the pretrained file.
And the function is aimed to return an embedding matrix for these words.
If `unk_method` is None, we generate vectors for these words,
by sampling from normal distribution with the same std and mean of the embedding matrix.
It is only useful when `vocab` is not `None`.
Returns
-------
If `vocab` is `None`
numpy.ndarray:
An embedding matrix in the pretrained file.
gluonnlp.data.Vocab:
The vocabulary in the pretrained file.
Otherwise,
numpy.ndarray:
An embedding matrix for the given vocabulary.
|
load_embeddings
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/embedding/embed_loader.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/embedding/embed_loader.py
|
Apache-2.0
|
def get_fasttext_model(model_name_or_dir='cc.en.300'):
""" Load fasttext model from the binaray file
This method will load fasttext model binaray file from a given file path or remote sources,
and return a `fasttext` model object. See `fasttext.cc` for more usage information.
Available sources:
['wiki-news-300d-1M-subword', 'crawl-300d-2M-subword', \
'cc.af.300', ..., 'cc.en.300', ..., 'wiki.aa', ..., 'wiki.en', ..., 'wiki.zu']
Detailed sources can be founded by `gluonnlp.embedding.list_sources('FastText.bin')`
Parameters
----------
model_name_or_dir : str, default 'cc.en.300'
A file path for a FastText binary file or the name of the FastText model.
This method would first check if it is a file path.
If not, the method will load from cache or download.
Returns
-------
fasttext.FastText._FastText:
A FastText model based on `fasttext` package.
"""
if os.path.exists(model_name_or_dir):
file_path = model_name_or_dir
else:
source = model_name_or_dir
root_path = os.path.expanduser(os.path.join(get_home_dir(), 'embedding'))
embedding_dir = os.path.join(root_path, 'fasttext')
if source not in C.FAST_TEXT_BIN_SHA1:
raise ValueError('Cannot recognize {} for the bin file'.format(source))
file_name, file_hash = C.FAST_TEXT_BIN_SHA1[source]
file_path = _get_file_path('fasttext', file_name, file_hash)
return fasttext.load_model(file_path)
|
Load fasttext model from the binaray file
This method will load fasttext model binaray file from a given file path or remote sources,
and return a `fasttext` model object. See `fasttext.cc` for more usage information.
Available sources:
['wiki-news-300d-1M-subword', 'crawl-300d-2M-subword', 'cc.af.300', ..., 'cc.en.300', ..., 'wiki.aa', ..., 'wiki.en', ..., 'wiki.zu']
Detailed sources can be founded by `gluonnlp.embedding.list_sources('FastText.bin')`
Parameters
----------
model_name_or_dir : str, default 'cc.en.300'
A file path for a FastText binary file or the name of the FastText model.
This method would first check if it is a file path.
If not, the method will load from cache or download.
Returns
-------
fasttext.FastText._FastText:
A FastText model based on `fasttext` package.
|
get_fasttext_model
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/embedding/embed_loader.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/embedding/embed_loader.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""
Generate the representation given the inputs.
This is used in training or fine-tuning a Bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C)
"""
# 1. Embed the data
time_axis = 1 if self.layout == 'NT' else 0
attn_mask = gen_self_attn_mask(data, valid_length, dtype=self._dtype,
attn_type='full', layout=self.layout)
out = data
all_encodings_outputs = []
additional_outputs = []
for layer_idx in range(self._num_layers):
groups_id = layer_idx // self._num_layers_each_group
layer = self.all_encoder_groups[groups_id]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = npx.sequence_mask(out,
sequence_length=valid_length,
use_sequence_length=True,
axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = npx.sequence_mask(out, sequence_length=valid_length,
use_sequence_length=True,
axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs
|
Generate the representation given the inputs.
This is used in training or fine-tuning a Bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length=None):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a Albert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units)
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_output
This is optional. Shape (batch_size, units)
"""
initial_embedding = self.get_initial_embedding(inputs, token_types)
# Projecting the embedding into units
prev_out = initial_embedding
if self.embed_size != self.units:
prev_out = self.embed_factorized_proj(prev_out)
outputs = []
if self._compute_layout != self._layout:
# Swap input to reflect the compute_layout
contextual_embeddings, additional_outputs = self.encoder(np.swapaxes(prev_out, 0, 1),
valid_length)
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(prev_out, valid_length)
outputs.append(contextual_embeddings)
if self.use_pooler:
pooled_out = self.apply_pooling(contextual_embeddings)
outputs.append(pooled_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
|
Generate the representation given the inputs.
This is used in training or fine-tuning a Albert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units)
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_output
This is optional. Shape (batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The types of tokens. If it is None, it will be initialized as all zeros.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_embed)
- layout = 'TN'
Shape (seq_length, batch_size, C_embed)
"""
if self.layout == 'NT':
batch_axis, time_axis = 0, 1
else:
batch_axis, time_axis = 1, 0
embedding = self.word_embed(inputs)
if token_types is None:
token_types = np.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding = self.token_pos_embed(npx.arange_like(inputs, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The types of tokens. If it is None, it will be initialized as all zeros.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_embed)
- layout = 'TN'
Shape (seq_length, batch_size, C_embed)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a Bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
pooled_out
Shape (batch_size, units)
"""
if self.layout == 'NT':
outputs = sequence[:, 0, :]
else:
outputs = sequence[0, :, :]
return self.pooler(outputs)
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a Bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
pooled_out
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def from_cfg(cls, cfg, use_pooler=True, dtype=None) -> 'AlbertModel':
"""
Parameters
----------
cfg
use_pooler
Whether to use pooler
dtype
The dtype of the backbone model
Returns
-------
model
The created AlbertModel
"""
cfg = cls.get_cfg().clone_merge(cfg)
assert cfg.VERSION == 1, 'Wrong version!'
embed_initializer = mx.init.create(*cfg.INITIALIZER.embed)
weight_initializer = mx.init.create(*cfg.INITIALIZER.weight)
bias_initializer = mx.init.create(*cfg.INITIALIZER.bias)
if dtype is None:
dtype = cfg.MODEL.dtype
return cls(vocab_size=cfg.MODEL.vocab_size,
units=cfg.MODEL.units,
hidden_size=cfg.MODEL.hidden_size,
embed_size=cfg.MODEL.embed_size,
num_layers=cfg.MODEL.num_layers,
num_heads=cfg.MODEL.num_heads,
num_groups=cfg.MODEL.num_groups,
max_length=cfg.MODEL.max_length,
hidden_dropout_prob=cfg.MODEL.hidden_dropout_prob,
attention_dropout_prob=cfg.MODEL.attention_dropout_prob,
num_token_types=cfg.MODEL.num_token_types,
pos_embed_type=cfg.MODEL.pos_embed_type,
activation=cfg.MODEL.activation,
layer_norm_eps=cfg.MODEL.layer_norm_eps,
dtype=dtype,
layout=cfg.MODEL.layout,
embed_initializer=embed_initializer,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_pooler=use_pooler)
|
Parameters
----------
cfg
use_pooler
Whether to use pooler
dtype
The dtype of the backbone model
Returns
-------
model
The created AlbertModel
|
from_cfg
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units)
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, mlm_scores
|
Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units)
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def __init__(self, backbone_cfg,
weight_initializer=None,
bias_initializer=None):
"""
Parameters
----------
backbone_cfg
The cfg of the backbone model
weight_initializer
bias_initializer
"""
super().__init__()
self.backbone_model = AlbertModel.from_cfg(backbone_cfg)
if weight_initializer is None:
weight_initializer = self.backbone_model.weight_initializer
if bias_initializer is None:
bias_initializer = self.backbone_model.bias_initializer
# Construct sop_classifier for sentence order prediction
self.sop_classifier = nn.Dense(units=2,
in_units=self.backbone_model.units,
weight_initializer=weight_initializer)
self.mlm_decoder = nn.HybridSequential()
# Extra non-linear layer
self.mlm_decoder.add(nn.Dense(units=self.backbone_model.embed_size,
in_units=self.backbone_model.units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer))
self.mlm_decoder.add(get_activation(self.backbone_model.activation))
self.mlm_decoder.add(nn.LayerNorm(epsilon=self.backbone_model.layer_norm_eps,
in_channels=self.backbone_model.embed_size))
# only load the dense weights with a re-initialized bias
# parameters are stored in 'word_embed_bias' which is
# not used in original embedding
self.mlm_decoder.add(nn.Dense(units=self.backbone_model.vocab_size,
in_units=self.backbone_model.embed_size,
flatten=False,
bias_initializer=bias_initializer))
self.mlm_decoder[-1].weight = self.backbone_model.word_embed.weight
|
Parameters
----------
backbone_cfg
The cfg of the backbone model
weight_initializer
bias_initializer
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a Albert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
Type of the tokens. If the inputs contain two sequences, we will set different
token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
sop_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
sop_score = self.sop_classifier(pooled_out)
if self.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, sop_score, mlm_scores
|
Generate the representation given the inputs.
This is used in training or fine-tuning a Albert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
Type of the tokens. If the inputs contain two sequences, we will set different
token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
sop_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def get_pretrained_albert(model_name: str = 'google_albert_base_v2',
root: str = get_model_zoo_home_dir(),
load_backbone: str = True,
load_mlm: str = False)\
-> Tuple[CN, SentencepieceTokenizer, str, str]:
"""Get the pretrained Albert weights
Parameters
----------
model_name
The name of the Albert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_albert())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
spm_model_path = PRETRAINED_URL[model_name]['spm_model']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('spm_model', spm_model_path), ('vocab', vocab_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for key, path in download_jobs:
local_paths[key] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = SentencepieceTokenizer(local_paths['spm_model'],
vocab=local_paths['vocab'],
lowercase=do_lower)
if cfg is None:
cfg = AlbertModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path
|
Get the pretrained Albert weights
Parameters
----------
model_name
The name of the Albert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
|
get_pretrained_albert
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/albert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/albert.py
|
Apache-2.0
|
def __init__(self,
use_pooler: bool = False,
classifier_activation: bool = False,
extract_feature: bool = False,
pooler_activation='tanh',
**kwargs):
"""
Parameters
----------
use_pooler
Whether to use pooler
classifier_activation
extract_feature
Whether to extract the feature
pooler_activation
**kwargs
"""
super().__init__(**kwargs)
assert self._src_vocab_size == self._tgt_vocab_size, \
'Vocab size mismatch between encoder and decoder'
self._vocab_size = self._src_vocab_size
self.extract_feature = extract_feature
self.use_pooler = use_pooler
self.classifier_activation = classifier_activation
if not extract_feature:
if self.tie_weights:
self.tgt_final_layer = \
nn.Dense(units=self._tgt_vocab_size,
in_units=self.dec_units,
flatten=False,
use_bias=False,
dtype=self._dtype)
self.tgt_final_layer.weight = self.tgt_embed_layer.weight
else:
self.tgt_final_layer = \
nn.Dense(units=self._tgt_vocab_size,
in_units=self.dec_units,
flatten=False,
weight_initializer=self.weight_initializer,
use_bias=False,
dtype=self._dtype)
elif use_pooler and classifier_activation:
# Construct pooler
self.pooler = nn.Dense(units=self.units,
in_units=self.units,
flatten=False,
activation=pooler_activation,
weight_initializer=self.weight_initializer,
bias_initializer=self.bias_initializer,
dtype=self._dtype)
|
Parameters
----------
use_pooler
Whether to use pooler
classifier_activation
extract_feature
Whether to extract the feature
pooler_activation
**kwargs
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bart.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bart.py
|
Apache-2.0
|
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length):
"""
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
A tuple contains
- If 'self.extract_feature' = True
- contextual_embedding
- layout = 'NT'
Shape (batch_size, tgt_length, units)
- layout = 'TN'
Shape (tgt_length, batch_size, units)
- pooled_output, optional, only enabled if use_pooler = True
Shape (batch_size, units)
- If 'self.extract_feature' = False
- dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
"""
enc_out = self.encode(src_data, src_valid_length)
contextual_embedding = self.decode_seq(tgt_data, tgt_valid_length, enc_out,
src_valid_length)
if self.extract_feature:
if self.use_pooler:
pooled_output = self.apply_pooling(contextual_embedding, tgt_valid_length)
return contextual_embedding, pooled_output
else:
return contextual_embedding
else:
dec_out = self.tgt_final_layer(contextual_embedding)
return dec_out
|
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
A tuple contains
- If 'self.extract_feature' = True
- contextual_embedding
- layout = 'NT'
Shape (batch_size, tgt_length, units)
- layout = 'TN'
Shape (tgt_length, batch_size, units)
- pooled_output, optional, only enabled if use_pooler = True
Shape (batch_size, units)
- If 'self.extract_feature' = False
- dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bart.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bart.py
|
Apache-2.0
|
def apply_pooling(self, sequence, valid_length):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a BART model.
In BART, the pooled output is the embedding of the last token.
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
valid_length
Valid length of each sequence
Shape (batch_size,)
Returns
-------
outputs
Shape (batch_size, units)
"""
if self._layout == 'NT':
batch_indices = mx.npx.arange_like(sequence, axis=0).astype(mx.np.int32)
outputs = sequence[batch_indices, valid_length - 1]
elif self._layout == 'TN':
batch_indices = mx.npx.arange_like(sequence, axis=1).astype(mx.np.int32)
outputs = sequence[valid_length - 1, batch_indices]
else:
raise NotImplementedError
if self.classifier_activation:
return self.pooler(outputs)
else:
return outputs
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a BART model.
In BART, the pooled output is the embedding of the last token.
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
valid_length
Valid length of each sequence
Shape (batch_size,)
Returns
-------
outputs
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bart.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bart.py
|
Apache-2.0
|
def from_cfg(cls, cfg,
dtype=None,
extract_feature=False,
use_pooler=True,
classifier_activation=False):
"""
Parameters
----------
cfg
The configuration
dtype
Data type of the loaded config
extract_feature
Whether to only extract feature.
If so, the output of the layer will be contextual embeddings or the
contextual embedding + pooled output
use_pooler
Whether to use pooler
classifier_activation
Whether to use the classifier activation
Returns
-------
model
The initialized BartModel
"""
cfg = cls.get_cfg().clone_merge(cfg)
embed_initializer = mx.init.create(*cfg.INITIALIZER.embed)
weight_initializer = mx.init.create(*cfg.INITIALIZER.weight)
bias_initializer = mx.init.create(*cfg.INITIALIZER.bias)
if dtype is None:
dtype = cfg.MODEL.dtype
return cls(src_vocab_size=cfg.MODEL.vocab_size,
tgt_vocab_size=cfg.MODEL.vocab_size,
max_src_length=cfg.MODEL.max_src_length,
max_tgt_length=cfg.MODEL.max_tgt_length,
scale_embed=cfg.MODEL.scale_embed,
pos_embed_type=cfg.MODEL.pos_embed_type,
shared_embed=cfg.MODEL.shared_embed,
tie_weights=cfg.MODEL.tie_weights,
data_norm=cfg.MODEL.data_norm,
extract_feature=extract_feature,
use_pooler=use_pooler,
classifier_activation=classifier_activation,
attention_dropout=cfg.MODEL.attention_dropout,
activation_dropout=cfg.MODEL.activation_dropout,
dropout=cfg.MODEL.dropout,
pooler_activation=cfg.MODEL.pooler_activation,
layer_norm_eps=cfg.MODEL.layer_norm_eps,
enc_num_layers=cfg.MODEL.ENCODER.num_layers,
enc_units=cfg.MODEL.ENCODER.units,
enc_num_heads=cfg.MODEL.ENCODER.num_heads,
enc_hidden_size=cfg.MODEL.ENCODER.hidden_size,
enc_recurrent=cfg.MODEL.ENCODER.recurrent,
enc_activation=cfg.MODEL.ENCODER.activation,
enc_pre_norm=cfg.MODEL.ENCODER.pre_norm,
dec_num_layers=cfg.MODEL.DECODER.num_layers,
dec_units=cfg.MODEL.DECODER.units,
dec_num_heads=cfg.MODEL.DECODER.num_heads,
dec_hidden_size=cfg.MODEL.DECODER.hidden_size,
dec_recurrent=cfg.MODEL.DECODER.recurrent,
dec_activation=cfg.MODEL.DECODER.activation,
dec_pre_norm=cfg.MODEL.DECODER.pre_norm,
layout=cfg.MODEL.layout,
embed_initializer=embed_initializer,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
|
Parameters
----------
cfg
The configuration
dtype
Data type of the loaded config
extract_feature
Whether to only extract feature.
If so, the output of the layer will be contextual embeddings or the
contextual embedding + pooled output
use_pooler
Whether to use pooler
classifier_activation
Whether to use the classifier activation
Returns
-------
model
The initialized BartModel
|
from_cfg
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bart.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bart.py
|
Apache-2.0
|
def get_pretrained_bart(model_name: str = 'fairseq_bart_base',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True) \
-> Tuple[CN, HuggingFaceByteBPETokenizer, str, List]:
"""Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
additional_output
The additional outputs
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_bart())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
merges_path = PRETRAINED_URL[model_name]['merges']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
local_paths = dict()
download_jobs = [('vocab', vocab_path), ('merges', merges_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceByteBPETokenizer(
merges_file=local_paths['merges'],
vocab_file=local_paths['vocab'],
lowercase=do_lower)
additional_out = []
if cfg is None:
cfg = BartModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, additional_out
|
Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
additional_output
The additional outputs
|
get_pretrained_bart
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bart.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bart.py
|
Apache-2.0
|
def get_backbone(model_name: str,
root: str = get_model_zoo_home_dir(),
**kwargs) -> Tuple['Block', str, BaseTokenizer, str, List]:
"""Get the backbone network
Parameters
----------
model_name
The name of the pretrained model
root
Downloaded directory of the model zoo
Returns
-------
model_cls
The class to construct the backbone network
cfg
Path to the config file of the backbone
tokenizer
The tokenizer that is bound to the backbone model
backbone_param_path
The path to the pretrained backbone weights
others
The other items returned by the create function.
Will be wrapped into a list
Examples
--------
>>> from gluonnlp.models import get_backbone
>>> model_cls, cfg, tokenizer, backbone_param_path, _ = get_backbone('google_en_cased_bert_base')
>>> model = model_cls.from_cfg(cfg)
>>> model.load_parameters(backbone_param_path)
"""
model_cls, local_create_fn = None, None
for backbone_type in BACKBONE_REGISTRY.list_keys():
ele_model_cls, ele_local_create_fn, list_key_fn = BACKBONE_REGISTRY.get(backbone_type)
if model_name in list_key_fn():
model_cls = ele_model_cls
local_create_fn = ele_local_create_fn
if model_cls is None or local_create_fn is None:
raise KeyError('The backbone model "{}" is not found! '
'Here are all available backbone models = {}'
.format(model_name,
list_backbone_names()))
cfg, tokenizer, local_params_path, *others = local_create_fn(model_name=model_name, root=root,
**kwargs)
return model_cls, cfg, tokenizer, local_params_path, others
|
Get the backbone network
Parameters
----------
model_name
The name of the pretrained model
root
Downloaded directory of the model zoo
Returns
-------
model_cls
The class to construct the backbone network
cfg
Path to the config file of the backbone
tokenizer
The tokenizer that is bound to the backbone model
backbone_param_path
The path to the pretrained backbone weights
others
The other items returned by the create function.
Will be wrapped into a list
Examples
--------
>>> from gluonnlp.models import get_backbone
>>> model_cls, cfg, tokenizer, backbone_param_path, _ = get_backbone('google_en_cased_bert_base')
>>> model = model_cls.from_cfg(cfg)
>>> model.load_parameters(backbone_param_path)
|
get_backbone
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/base.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/base.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length, dtype=self._dtype,
attn_type='full', layout=self.layout)
out = data
all_encodings_outputs = []
additional_outputs = []
for layer_idx in range(self._num_layers):
layer = self.all_layers[layer_idx]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units] or [seq_len, batch_size, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = npx.sequence_mask(out,
sequence_length=valid_length,
use_sequence_length=True, axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = npx.sequence_mask(out, sequence_length=valid_length,
use_sequence_length=True, axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length):
# pylint: disable=arguments-differ
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output
This is optional. Shape (batch_size, units)
"""
initial_embedding = self.get_initial_embedding(inputs, token_types)
prev_out = initial_embedding
outputs = []
if self._compute_layout != self._layout:
# Swap the axes if the compute_layout and layout mismatch
contextual_embeddings, additional_outputs = self.encoder(np.swapaxes(prev_out, 0, 1),
valid_length)
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(prev_out, valid_length)
outputs.append(contextual_embeddings)
if self.use_pooler:
pooled_out = self.apply_pooling(contextual_embeddings)
outputs.append(pooled_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output
This is optional. Shape (batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of tokens. If None, it will be initialized as all zero.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
embedding = self.word_embed(inputs)
if token_types is None:
token_types = np.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding = self.token_pos_embed(npx.arange_like(inputs, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of tokens. If None, it will be initialized as all zero.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS].
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
outputs
Shape (batch_size, units)
"""
if self.layout == 'NT':
outputs = sequence[:, 0, :]
else:
outputs = sequence[0, :, :]
return self.pooler(outputs)
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS].
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
outputs
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def from_cfg(cls, cfg, use_pooler=True, dtype=None) -> 'BertModel':
"""
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
dtype
data type of the model
Returns
-------
ret
The constructed BertModel
"""
cfg = BertModel.get_cfg().clone_merge(cfg)
assert cfg.VERSION == 1, 'Wrong version!'
embed_initializer = mx.init.create(*cfg.INITIALIZER.embed)
weight_initializer = mx.init.create(*cfg.INITIALIZER.weight)
bias_initializer = mx.init.create(*cfg.INITIALIZER.bias)
if dtype is None:
dtype = cfg.MODEL.dtype
return cls(vocab_size=cfg.MODEL.vocab_size,
units=cfg.MODEL.units,
hidden_size=cfg.MODEL.hidden_size,
num_layers=cfg.MODEL.num_layers,
num_heads=cfg.MODEL.num_heads,
max_length=cfg.MODEL.max_length,
hidden_dropout_prob=cfg.MODEL.hidden_dropout_prob,
attention_dropout_prob=cfg.MODEL.attention_dropout_prob,
num_token_types=cfg.MODEL.num_token_types,
pos_embed_type=cfg.MODEL.pos_embed_type,
activation=cfg.MODEL.activation,
layer_norm_eps=cfg.MODEL.layer_norm_eps,
dtype=dtype,
embed_initializer=embed_initializer,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_pooler=use_pooler,
layout=cfg.MODEL.layout,
compute_layout=cfg.MODEL.compute_layout)
|
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
dtype
data type of the model
Returns
-------
ret
The constructed BertModel
|
from_cfg
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, mlm_scores
|
Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
nsp_score = self.nsp_classifier(pooled_out)
if self.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, nsp_score, mlm_scores
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def get_pretrained_bert(model_name: str = 'google_en_cased_bert_base',
root: str = get_model_zoo_home_dir(),
load_backbone: str = True,
load_mlm: str = False)\
-> Tuple[CN, HuggingFaceWordPieceTokenizer, str, str]:
"""Get the pretrained bert weights
Parameters
----------
model_name
The name of the bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_bert())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for key, path in download_jobs:
local_paths[key] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceWordPieceTokenizer(
vocab_file=local_paths['vocab'],
unk_token='[UNK]',
pad_token='[PAD]',
cls_token='[CLS]',
sep_token='[SEP]',
mask_token='[MASK]',
lowercase=do_lower)
if cfg is None:
cfg = BertModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path
|
Get the pretrained bert weights
Parameters
----------
model_name
The name of the bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
|
get_pretrained_bert
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/bert.py
|
Apache-2.0
|
def get_generator_cfg(model_config):
"""
Get the generator configuration from the Electra model config.
The size of generator is usually smaller than discriminator but same in electra small,
which exists a conflict between source code and original paper.
"""
generator_cfg = model_config.clone()
generator_layers_scale = model_config.MODEL.generator_layers_scale
generator_units_scale = model_config.MODEL.generator_units_scale
generator_cfg.defrost()
# the round function is used to slove int(0.3333*768)!=256 for electra base
generator_cfg.MODEL.units = round(generator_units_scale * model_config.MODEL.units)
generator_cfg.MODEL.hidden_size = round(generator_units_scale * model_config.MODEL.hidden_size)
generator_cfg.MODEL.num_heads = round(generator_units_scale * model_config.MODEL.num_heads)
generator_cfg.MODEL.num_layers = round(generator_layers_scale * model_config.MODEL.num_layers)
generator_cfg.freeze()
return generator_cfg
|
Get the generator configuration from the Electra model config.
The size of generator is usually smaller than discriminator but same in electra small,
which exists a conflict between source code and original paper.
|
get_generator_cfg
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def __init__(self, units=512,
hidden_size=2048,
num_layers=6,
num_heads=8,
attention_dropout_prob=0.,
hidden_dropout_prob=0.,
output_attention=False,
dtype='float32',
output_all_encodings=False,
layer_norm_eps=1E-12,
weight_initializer=TruncNorm(stdev=0.02),
bias_initializer='zeros',
activation='gelu',
layout='NT'):
"""
Parameters
----------
units
The number of units
hidden_size
The hidden size
num_layers
Number of layers
num_heads
Number of heads
attention_dropout_prob
Dropout probability of the attention layer
hidden_dropout_prob
Dropout probability
output_attention
Whether to output the attention weights
dtype
Data type of the weights
output_all_encodings
layer_norm_eps
weight_initializer
bias_initializer
activation
layout
"""
super().__init__()
assert units % num_heads == 0, \
'In ElectraEncoder, The units should be divisible ' \
'by the number of heads. Received units={}, num_heads={}' \
.format(units, num_heads)
self._dtype = dtype
self._layout = layout
self._num_layers = num_layers
self._output_attention = output_attention
self._output_all_encodings = output_all_encodings
self.all_encoder_layers = nn.HybridSequential()
for layer_idx in range(num_layers):
self.all_encoder_layers.add(
TransformerEncoderLayer(units=units,
hidden_size=hidden_size,
num_heads=num_heads,
attention_dropout_prob=attention_dropout_prob,
hidden_dropout_prob=hidden_dropout_prob,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=activation,
dtype=dtype,
layout=layout))
|
Parameters
----------
units
The number of units
hidden_size
The hidden size
num_layers
Number of layers
num_heads
Number of heads
attention_dropout_prob
Dropout probability of the attention layer
hidden_dropout_prob
Dropout probability
output_attention
Whether to output the attention weights
dtype
Data type of the weights
output_all_encodings
layer_norm_eps
weight_initializer
bias_initializer
activation
layout
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a Electra model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
layout=self._layout,
attn_type='full')
out = data
all_encodings_outputs = []
additional_outputs = []
for layer_idx in range(self._num_layers):
layer = self.all_encoder_layers[layer_idx]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = npx.sequence_mask(out,
sequence_length=valid_length,
use_sequence_length=True,
axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = npx.sequence_mask(out, sequence_length=valid_length,
use_sequence_length=True, axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs
|
Generate the representation given the inputs.
This is used in training or fine-tuning a Electra model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length=None):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a Electra model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output
This is optional. Shape (batch_size, units)
"""
initial_embedding = self.get_initial_embedding(inputs, token_types)
# Projecting the embedding into units
prev_out = initial_embedding
if self.embed_size != self.units:
prev_out = self.embed_factorized_proj(prev_out)
outputs = []
if self._compute_layout != self._layout:
# Swap the axes if the compute_layout and layout mismatch
contextual_embeddings, additional_outputs = self.encoder(np.swapaxes(prev_out, 0, 1),
valid_length)
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(prev_out, valid_length)
outputs.append(contextual_embeddings)
if self.use_pooler:
# Here we just get the first token ([CLS]) without any pooling strategy,
# which is slightly different from bert model with the pooled_out
# the attribute name is keeping the same as bert and albert model with defualt
# use_pooler=True
if self._layout == 'NT':
pooled_out = contextual_embeddings[:, 0, :]
else:
pooled_out = contextual_embeddings[0, :, :]
outputs.append(pooled_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
|
Generate the representation given the inputs.
This is used in training or fine-tuning a Electra model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output
This is optional. Shape (batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of tokens. If None, it will be initialized as all zero.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_embed)
- layout = 'TN'
Shape (seq_length, batch_size, C_embed)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
embedding = self.word_embed(inputs)
if token_types is None:
token_types = np.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding = self.token_pos_embed(npx.arange_like(inputs, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of tokens. If None, it will be initialized as all zero.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_embed)
- layout = 'TN'
Shape (seq_length, batch_size, C_embed)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def apply_layerwise_decay(self, layerwise_decay: int,
not_included: Optional[List[str]] = None,
num_additional_layers: int = 2):
"""Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters
----------
layerwise_decay
Power rate of the layer-wise decay
not_included
A list or parameter names that not included in the layer-wise decay
num_additional_layers
The number of layers after the current backbone. This helps determine the max depth
"""
# Consider the task specific finetuning layer as the last layer, following with pooler
# In addition, the embedding parameters have the smaller learning rate based on this
# setting.
max_depth = self.num_layers + num_additional_layers
for _, value in self.collect_params('.*embed*').items():
value.lr_mult = layerwise_decay ** max_depth
for (layer_depth, layer) in enumerate(self.encoder.all_encoder_layers):
layer_params = layer.collect_params()
for key, value in layer_params.items():
if not_included:
for pn in not_included:
if pn in key:
continue
value.lr_mult = layerwise_decay**(max_depth - (layer_depth + 1))
|
Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters
----------
layerwise_decay
Power rate of the layer-wise decay
not_included
A list or parameter names that not included in the layer-wise decay
num_additional_layers
The number of layers after the current backbone. This helps determine the max depth
|
apply_layerwise_decay
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def frozen_params(self, untunable_depth: int, not_included: Optional[List[str]] = None):
"""Froze part of parameters according to layer depth.
That is, make all layer that shallower than `untunable_depth` untunable
to stop the gradient backward computation and accelerate the training.
Parameters
----------
untunable_depth
the depth of the neural network starting from 1 to number of layers
not_included
A list or parameter names that not included in the untunable parameters
"""
all_layers = self.encoder.all_encoder_layers
for _, value in self.collect_params('.*embed*').items():
value.grad_req = 'null'
for layer in all_layers[:untunable_depth]:
for key, value in layer.collect_params().items():
if not_included:
for pn in not_included:
if pn in key:
continue
value.grad_req = 'null'
|
Froze part of parameters according to layer depth.
That is, make all layer that shallower than `untunable_depth` untunable
to stop the gradient backward computation and accelerate the training.
Parameters
----------
untunable_depth
the depth of the neural network starting from 1 to number of layers
not_included
A list or parameter names that not included in the untunable parameters
|
frozen_params
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length):
"""Getting the scores of the replaced token detection of the whole sentence
based on the corrupted tokens produced from a generator.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
rtd_scores
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
rtd_scores = self.rtd_encoder(contextual_embeddings).squeeze(-1)
return contextual_embeddings, pooled_out, rtd_scores
|
Getting the scores of the replaced token detection of the whole sentence
based on the corrupted tokens produced from a generator.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
rtd_scores
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def __init__(self, backbone_cfg,
weight_initializer=None,
bias_initializer=None):
"""
Parameters
----------
backbone_cfg
Configuration of the backbone model
weight_initializer
bias_initializer
"""
super().__init__()
self.backbone_model = ElectraModel.from_cfg(backbone_cfg)
if weight_initializer is None:
weight_initializer = self.backbone_model.weight_initializer
if bias_initializer is None:
bias_initializer = self.backbone_model.bias_initializer
self.mlm_decoder = nn.HybridSequential()
# Extra non-linear layer
self.mlm_decoder.add(nn.Dense(units=self.backbone_model.embed_size,
in_units=self.backbone_model.units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer))
self.mlm_decoder.add(get_activation(self.backbone_model.activation))
self.mlm_decoder.add(nn.LayerNorm(epsilon=self.backbone_model.layer_norm_eps,
in_channels=self.backbone_model.embed_size))
# only load the dense weights with a re-initialized bias
# parameters are stored in 'word_embed_bias' which is
# not used in original embedding
self.mlm_decoder.add(
nn.Dense(
units=self.backbone_model.vocab_size,
in_units=self.backbone_model.embed_size,
flatten=False,
bias_initializer=bias_initializer))
self.mlm_decoder[-1].weight = self.backbone_model.word_embed.weight
|
Parameters
----------
backbone_cfg
Configuration of the backbone model
weight_initializer
bias_initializer
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def tie_embeddings(self, word_embed_params=None,
token_type_embed_params=None,
token_pos_embed_params=None,
embed_layer_norm_params=None):
"""Tie the embedding layers between the backbone and the MLM decoder
Parameters
----------
word_embed_params
token_type_embed_params
token_pos_embed_params
embed_layer_norm_params
"""
self.backbone_model.word_embed.share_parameters(word_embed_params)
self.mlm_decoder[-1].share_parameters(word_embed_params)
self.backbone_model.token_type_embed.share_parameters(token_type_embed_params)
self.backbone_model.token_pos_embed.share_parameters(token_pos_embed_params)
self.backbone_model.embed_layer_norm.share_parameters(embed_layer_norm_params)
|
Tie the embedding layers between the backbone and the MLM decoder
Parameters
----------
word_embed_params
token_type_embed_params
token_pos_embed_params
embed_layer_norm_params
|
tie_embeddings
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.backbone_model.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, mlm_scores
|
Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def __init__(self,
disc_cfg,
uniform_generator=False,
tied_generator=False,
tied_embeddings=True,
disallow_correct=False,
temperature=1.0,
gumbel_eps=1E-9,
dtype='float32',
weight_initializer=None,
bias_initializer=None):
"""
Parameters
----------
disc_cfg :
Config for discriminator model including scaled size for generator
uniform_generator :
Wether to get a generator with uniform weights, the mlm_scores from
which are totally random. In this case , a discriminator learns from
a random 15% of the input tokens distinct from the subset.
tied_generator :
Whether to tie backbone model weights of generator and discriminator.
The size of G and D are required to be same if set to True.
tied_embeddings :
Whether to tie the embeddings of generator and discriminator
disallow_correct :
Whether the correct smaples of generator are allowed,
that is 15% of tokens are always fake.
temperature :
Temperature of gumbel distribution for sampling from generator
weight_initializer
bias_initializer
"""
super().__init__()
self._uniform_generator = uniform_generator
self._tied_generator = tied_generator
self._tied_embeddings = tied_embeddings
self._disallow_correct = disallow_correct
self._temperature = temperature
self._gumbel_eps = gumbel_eps
self._dtype = dtype
self.disc_cfg = disc_cfg
self.vocab_size = disc_cfg.MODEL.vocab_size
self.gen_cfg = get_generator_cfg(disc_cfg)
self.discriminator = ElectraDiscriminator(disc_cfg,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
self.disc_backbone = self.discriminator.backbone_model
if not uniform_generator and not tied_generator:
self.generator = ElectraGenerator(self.gen_cfg,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
if tied_embeddings:
self.generator.tie_embeddings(self.disc_backbone.word_embed.collect_params(),
self.disc_backbone.token_type_embed.collect_params(),
self.disc_backbone.token_pos_embed.collect_params(),
self.disc_backbone.embed_layer_norm.collect_params())
elif tied_generator:
# Reuse the weight of the discriminator backbone model
self.generator = ElectraGenerator(self.gen_cfg,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
# TODO(sxjscience, zheyu) Verify
self.generator.backbone_model = self.disc_backbone
elif uniform_generator:
# get the mlm_scores randomly over vocab
self.generator = None
|
Parameters
----------
disc_cfg :
Config for discriminator model including scaled size for generator
uniform_generator :
Wether to get a generator with uniform weights, the mlm_scores from
which are totally random. In this case , a discriminator learns from
a random 15% of the input tokens distinct from the subset.
tied_generator :
Whether to tie backbone model weights of generator and discriminator.
The size of G and D are required to be same if set to True.
tied_embeddings :
Whether to tie the embeddings of generator and discriminator
disallow_correct :
Whether the correct smaples of generator are allowed,
that is 15% of tokens are always fake.
temperature :
Temperature of gumbel distribution for sampling from generator
weight_initializer
bias_initializer
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
original_tokens, masked_positions):
"""Getting the mlm scores of each masked positions from a generator,
then produces the corrupted tokens sampling from a gumbel distribution.
We also get the ground-truth and scores of the replaced token detection
which is output by a discriminator. The ground-truth is an array with same
shape as the input using 1 stand for original token and 0 for replacement.
Notice: There is a problem when the masked positions have duplicate indexs.
Try to avoid that in the data preprocessing process. In addition, loss calculation
should be done in the training scripts as well.
Parameters
----------
inputs
The masked input
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The token types. If the inputs contain two sequences, we will set different token types
for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence.
Shape (batch_size,)
original_tokens
The original tokens that appear in the unmasked input sequence.
Shape (batch_size, num_masked_positions).
masked_positions :
The masked position of the sequence.
Shape (batch_size, num_masked_positions).
Returns
-------
mlm_scores
The masked language model score.
Shape (batch_size, num_masked_positions, vocab_size)
rtd_scores
The replaced-token-detection score. Predicts whether the tokens are replaced or not.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
replaced_inputs
Shape (batch_size, num_masked_positions)
labels
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
"""
if self._uniform_generator:
# generate the corrupt tokens randomly with a mlm_scores vector whose value is all 0
zero_logits = np.zeros((1, 1, self.vocab_size), dtype=self._dtype)
mlm_scores = np.expand_dims(np.zeros_like(masked_positions, dtype=self._dtype),
axis=-1)
mlm_scores = mlm_scores + zero_logits
else:
_, _, mlm_scores = self.generator(inputs, token_types, valid_length, masked_positions)
corrupted_tokens, fake_data, labels = self.get_corrupted_tokens(
inputs, original_tokens, masked_positions, mlm_scores)
# The discriminator takes the same input as the generator and the token_ids are
# replaced with fake data
_, _, rtd_scores = self.discriminator(fake_data, token_types, valid_length)
return mlm_scores, rtd_scores, corrupted_tokens, labels
|
Getting the mlm scores of each masked positions from a generator,
then produces the corrupted tokens sampling from a gumbel distribution.
We also get the ground-truth and scores of the replaced token detection
which is output by a discriminator. The ground-truth is an array with same
shape as the input using 1 stand for original token and 0 for replacement.
Notice: There is a problem when the masked positions have duplicate indexs.
Try to avoid that in the data preprocessing process. In addition, loss calculation
should be done in the training scripts as well.
Parameters
----------
inputs
The masked input
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The token types. If the inputs contain two sequences, we will set different token types
for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence.
Shape (batch_size,)
original_tokens
The original tokens that appear in the unmasked input sequence.
Shape (batch_size, num_masked_positions).
masked_positions :
The masked position of the sequence.
Shape (batch_size, num_masked_positions).
Returns
-------
mlm_scores
The masked language model score.
Shape (batch_size, num_masked_positions, vocab_size)
rtd_scores
The replaced-token-detection score. Predicts whether the tokens are replaced or not.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
replaced_inputs
Shape (batch_size, num_masked_positions)
labels
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def get_corrupted_tokens(self, inputs, original_tokens, masked_positions, logits):
"""
Sample from the generator to create corrupted input.
Parameters
----------
inputs
The masked input
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
original_tokens
The original tokens that appear in the unmasked input sequence
Shape (batch_size, num_masked_positions).
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
logits
The logits of each tokens
Shape (batch_size, num_masked_positions, vocab_size)
Returns
-------
corrupted_tokens
Shape (batch_size, )
fake_data
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
labels
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
"""
if self._disallow_correct:
# TODO(sxjscience), Revise the implementation
disallow = npx.one_hot(masked_positions, depth=self.vocab_size, dtype=self._dtype)
logits = logits - 1000.0 * disallow
# gumbel_softmax() samples from the logits with a noise of Gumbel distribution
prob = gumbel_softmax(
logits,
temperature=self._temperature,
eps=self._gumbel_eps,
use_np_gumbel=False)
corrupted_tokens = np.argmax(prob, axis=-1).astype(np.int32)
if self.disc_backbone.layout == 'TN':
inputs = inputs.T
original_data = update_vectors_by_position(inputs, original_tokens, masked_positions)
fake_data = update_vectors_by_position(inputs, corrupted_tokens, masked_positions)
updates_mask = add_vectors_by_position(np.zeros_like(inputs),
np.ones_like(masked_positions), masked_positions)
# Dealing with multiple zeros in masked_positions which
# results in a non-zero value in the first index [CLS]
updates_mask = np.minimum(updates_mask, 1)
labels = updates_mask * np.not_equal(fake_data, original_data)
if self.disc_backbone.layout == 'TN':
return corrupted_tokens, fake_data.T, labels.T
else:
return corrupted_tokens, fake_data, labels
|
Sample from the generator to create corrupted input.
Parameters
----------
inputs
The masked input
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
original_tokens
The original tokens that appear in the unmasked input sequence
Shape (batch_size, num_masked_positions).
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
logits
The logits of each tokens
Shape (batch_size, num_masked_positions, vocab_size)
Returns
-------
corrupted_tokens
Shape (batch_size, )
fake_data
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
labels
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
|
get_corrupted_tokens
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def get_pretrained_electra(model_name: str = 'google_electra_small',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_disc: bool = False,
load_gen: bool = False) \
-> Tuple[CN, HuggingFaceWordPieceTokenizer,
Optional[str],
Tuple[Optional[str], Optional[str]]]:
"""Get the pretrained Electra weights
Parameters
----------
model_name
The name of the Electra model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_disc
Whether to load the weights of the discriminator
load_gen
Whether to load the weights of the generator
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
other_net_params_paths
Path to the parameter of the discriminator and the generator.
They will be returned inside a tuple.
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_electra())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
disc_params_path = PRETRAINED_URL[model_name]['disc_model']
gen_params_path = PRETRAINED_URL[model_name]['gen_model']
local_paths = dict()
download_jobs = [('vocab', vocab_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_disc:
local_disc_params_path = download(url=get_repo_model_zoo_url() + disc_params_path,
path=os.path.join(root, disc_params_path),
sha1_hash=FILE_STATS[disc_params_path])
else:
local_disc_params_path = None
if load_gen:
local_gen_params_path = download(url=get_repo_model_zoo_url() + gen_params_path,
path=os.path.join(root, gen_params_path),
sha1_hash=FILE_STATS[gen_params_path])
else:
local_gen_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceWordPieceTokenizer(
vocab_file=local_paths['vocab'],
unk_token='[UNK]',
pad_token='[PAD]',
cls_token='[CLS]',
sep_token='[SEP]',
mask_token='[MASK]',
lowercase=do_lower)
if cfg is None:
cfg = ElectraModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, (local_disc_params_path, local_gen_params_path)
|
Get the pretrained Electra weights
Parameters
----------
model_name
The name of the Electra model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_disc
Whether to load the weights of the discriminator
load_gen
Whether to load the weights of the generator
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
other_net_params_paths
Path to the parameter of the discriminator and the generator.
They will be returned inside a tuple.
|
get_pretrained_electra
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/electra.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/electra.py
|
Apache-2.0
|
def forward(self, x, layer_states):
"""
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
layer_states
- layout = 'NT'
Shape (2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (2, prev_len, batch_size, C_in)
"""
x = self.ln(x)
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
prev_len = npx.shape_array(layer_states)[2]
else:
batch_axis, time_axis = 1, 0
prev_len = npx.shape_array(layer_states)[1]
query, key, value = np.split(self.qkv(x), 3, axis=-1)
if layer_states is not None:
prev_key, prev_value = layer_states[0], layer_states[1]
key = np.concatenate([prev_key, key], axis=time_axis)
value = np.concatenate([prev_value, value], axis=time_axis)
new_states = np.stack([key, value], axis=0)
# gen mask
query_pos = npx.arange_like(query, axis=time_axis)
if prev_len is not None:
query_pos = query_pos + prev_len
key_pos = npx.arange_like(key, axis=time_axis)
# (query_len, key_len)
mask = (npx.reshape(key_pos, (1, -1)) <=
npx.reshape(query_pos, (-1, 1))).astype(self._dtype)
# broadcast to (batch_size, query_len, key_len)
mask = npx.broadcast_like(
np.expand_dims(mask, axis=0),
query,
lhs_axes=0,
rhs_axes=batch_axis
)
query = npx.reshape(query, (-2, -2, self._num_heads, -1))
key = npx.reshape(key, (-2, -2, self._num_heads, -1))
value = npx.reshape(value, (-2, -2, self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, mask)
out = self.out_proj(out)
out = self.hidden_dropout(out)
return out, new_states
|
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
layer_states
- layout = 'NT'
Shape (2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (2, prev_len, batch_size, C_in)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def forward(self, x, layer_states):
"""
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
layer_states
- layout = 'NT'
Shape (2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (2, prev_len, batch_size, C_in)
Returns
-------
new_x
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
new_states
- layout = 'NT'
Shape (2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (2, prev_len + seq_length, batch_size, C_in)
"""
h, new_layer_states = self.atten(x, layer_states)
x = x + h
h = self.ffn(x)
return h, new_layer_states
|
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
layer_states
- layout = 'NT'
Shape (2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (2, prev_len, batch_size, C_in)
Returns
-------
new_x
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
new_states
- layout = 'NT'
Shape (2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (2, prev_len + seq_length, batch_size, C_in)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def forward(self, x, states):
"""
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
states
The previous states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len, C_in)]
- layout = 'TN'
Shape (num_layers, 2, prev_len, batch_size, C_in)]
Returns
-------
new_x
Output
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
new_states
The new states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len + seq_length, batch_size, C_in)
"""
prev_len = npx.shape_array(states)[3] if self._layout == 'NT' else \
npx.shape_array(states)[2]
x = self.get_initial_embedding(x, prev_len)
if self._layout != self._compute_layout:
x = np.swapaxes(x, 0, 1)
states = np.swapaxes(states, 2, 3)
new_states = []
for layer_idx in range(self._num_layers):
layer_states = None if states is None else states[layer_idx]
x, new_layer_states = self._layers[layer_idx](x, layer_states)
new_states.append(new_layer_states)
new_states = np.stack(new_states, axis=0)
x = self._final_ln(x)
if self._layout != self._compute_layout:
x = np.swapaxes(x, 0, 1)
new_states = np.swapaxes(new_states, 2, 3)
return x, new_states
|
Parameters
----------
x
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
states
The previous states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len, C_in)]
- layout = 'TN'
Shape (num_layers, 2, prev_len, batch_size, C_in)]
Returns
-------
new_x
Output
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
new_states
The new states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len + seq_length, batch_size, C_in)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, prev_len):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
prev_len
The previous length. It will be a scalar.
Returns
-------
embedding
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
"""
embedding = self._embed(inputs)
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
else:
batch_axis, time_axis = 1, 0
if self._pos_embed_type is not None:
pos = npx.arange_like(inputs, axis=time_axis)
if prev_len is not None:
pos = pos + prev_len
positional_embedding = self._pos_embed(pos)
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
embedding = self._embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
prev_len
The previous length. It will be a scalar.
Returns
-------
embedding
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def init_states(self, batch_size, ctx, dtype=None):
"""Initialize the states required for incremental decoding
Returns
-------
init_states
- layout = 'NT'
Shape (num_layers, 2, batch_size, 0, C_in)
- layout = 'TN'
Shape (num_layers, 2, 0, batch_size, C_in)
"""
if dtype is None:
dtype = self._dtype
return mx.np.zeros(shape=(self._num_layers, 2, batch_size, 0,
self._units), ctx=ctx, dtype=dtype) if self.layout == 'NT' else \
mx.np.zeros(shape=(self._num_layers, 2, 0, batch_size,
self._units), ctx=ctx, dtype=dtype)
|
Initialize the states required for incremental decoding
Returns
-------
init_states
- layout = 'NT'
Shape (num_layers, 2, batch_size, 0, C_in)
- layout = 'TN'
Shape (num_layers, 2, 0, batch_size, C_in)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def forward(self, inputs, states):
"""Getting the logits. This can be used for language modeling.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
states
The states.
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len, batch_size, C_in)
Returns
-------
logits
- layout = 'NT'
Shape (batch_size, seq_length, vocab_size).
- layout = 'TN'
Shape (seq_length, batch_size, vocab_size).
new_states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len + seq_length, batch_size, C_in)
"""
contextual_embeddings, new_states = self._backbone_model(inputs, states)
logits = self._lm_head(contextual_embeddings)
return logits, new_states
|
Getting the logits. This can be used for language modeling.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
states
The states.
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len, batch_size, C_in)
Returns
-------
logits
- layout = 'NT'
Shape (batch_size, seq_length, vocab_size).
- layout = 'TN'
Shape (seq_length, batch_size, vocab_size).
new_states
- layout = 'NT'
Shape (num_layers, 2, batch_size, prev_len + seq_length, C_in)
- layout = 'TN'
Shape (num_layers, 2, prev_len + seq_length, batch_size, C_in)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def get_pretrained_gpt2(model_name: str = 'gpt2_124M',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_lm: bool = False)\
-> Tuple[CN, HuggingFaceByteBPETokenizer, str, str]:
"""Get the pretrained GPT-2 weights
Parameters
----------
model_name
The name of the GPT-2 model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_lm
Whether to load the weights of LM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
lm_params_path
Path to the parameter that includes both the backbone and the LM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_gpt2())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
merges_path = PRETRAINED_URL[model_name]['merges']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
lm_params_path = PRETRAINED_URL[model_name]['lm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path), ('merges', merges_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_lm and lm_params_path is not None:
local_lm_params_path = download(url=get_repo_model_zoo_url() + lm_params_path,
path=os.path.join(root, lm_params_path),
sha1_hash=FILE_STATS[lm_params_path])
else:
local_lm_params_path = None
tokenizer = HuggingFaceByteBPETokenizer(
merges_file=local_paths['merges'],
vocab_file=local_paths['vocab'])
if cfg is None:
cfg = GPT2Model.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_lm_params_path
|
Get the pretrained GPT-2 weights
Parameters
----------
model_name
The name of the GPT-2 model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_lm
Whether to load the weights of LM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
lm_params_path
Path to the parameter that includes both the backbone and the LM
|
get_pretrained_gpt2
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/gpt2.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/gpt2.py
|
Apache-2.0
|
def __init__(self,
use_bottleneck: bool = True,
units: int = 512,
real_units: int = 128,
hidden_size: int = 2048,
num_heads: int = 8,
num_stacked_ffn: int = 1,
bottleneck_strategy: str = 'qk_sharing',
attention_dropout_prob: float = 0.1,
hidden_dropout_prob: float = 0.1,
activation_dropout_prob: float = 0.0,
activation: str = 'gelu',
normalization: str = 'layer_norm',
layer_norm_eps: float = 1e-12,
use_qkv_bias: bool = True,
weight_initializer: Optional[InitializerType] = None,
bias_initializer: Optional[InitializerType] = 'zeros',
dtype='float32',
layout='NT'):
"""
Parameters
----------
use_bottleneck
Whether to use the bottleneck layer.
units
size of inter-bottleneck
real_units
size of intra-bottleneck
hidden_size
size of feed-forward network
num_heads
num_stacked_ffn
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
activation
normalization
layer_norm_eps
onlyv valid when normalization is 'layer_norm'
use_qkv_bias
weight_initializer
bias_initializer
dtype
Data type of the block
layout
Layout of the input + output
"""
super().__init__()
self._use_bottleneck = use_bottleneck
self._units = units
self._real_units = real_units
self._num_heads = num_heads
self._num_stacked_ffn = num_stacked_ffn
self._bottleneck_strategy = bottleneck_strategy
self._dtype = dtype
self._layout = layout
assert real_units % num_heads == 0, 'units must be divisive by the number of heads'
self.dropout_layer = nn.Dropout(hidden_dropout_prob)
if use_bottleneck:
self.in_bottleneck_proj = nn.Dense(units=real_units,
in_units=units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.in_bottleneck_ln = get_norm_layer(normalization=normalization,
in_channels=real_units,
epsilon=layer_norm_eps)
self.out_bottleneck_proj = nn.Dense(units=units,
in_units=real_units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.out_bottleneck_ln = get_norm_layer(normalization=normalization,
in_channels=units,
epsilon=layer_norm_eps)
if bottleneck_strategy == 'qk_sharing':
self.shared_qk = nn.Dense(units=real_units,
in_units=units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.shared_qk_ln = get_norm_layer(normalization=normalization,
in_channels=real_units,
epsilon=layer_norm_eps)
self.attention_proj = nn.Dense(units=real_units,
flatten=False,
in_units=real_units,
use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
# The in_units of qkv varies according to the sharing strategy
if self._use_bottleneck:
if self._bottleneck_strategy == 'qk_sharing':
attn_query_in_units = real_units
attn_key_in_units = real_units
attn_value_in_units = units
elif self._bottleneck_strategy == 'from_bottleneck':
attn_query_in_units = real_units
attn_key_in_units = real_units
attn_value_in_units = real_units
elif self._bottleneck_strategy == 'from_input':
attn_query_in_units = units
attn_key_in_units = units
attn_value_in_units = units
else:
raise NotImplementedError
else:
attn_query_in_units = units
attn_key_in_units = units
attn_value_in_units = units
self.attn_query = nn.Dense(units=real_units,
in_units=attn_query_in_units,
flatten=False,
use_bias=use_qkv_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.attn_key = nn.Dense(units=real_units,
in_units=attn_key_in_units,
flatten=False,
use_bias=use_qkv_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.attn_value = nn.Dense(units=real_units,
in_units=attn_value_in_units,
flatten=False,
use_bias=use_qkv_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
attention_layout = 'NTK' if self._layout == 'NT' else 'TNK'
self.attention_cell = \
MultiHeadAttentionCell(
query_units=real_units,
num_heads=num_heads,
attention_dropout=attention_dropout_prob,
scaled=True,
dtype=self._dtype,
layout=attention_layout
)
self.layer_norm = get_norm_layer(normalization=normalization,
in_channels=real_units,
epsilon=layer_norm_eps)
self.stacked_ffn = nn.HybridSequential()
for ffn_idx in range(num_stacked_ffn):
is_last_ffn = (ffn_idx == (num_stacked_ffn - 1))
# only apply dropout on last ffn layer if use bottleneck
dropout = float(hidden_dropout_prob * (not use_bottleneck) * is_last_ffn)
self.stacked_ffn.add(
PositionwiseFFN(units=real_units,
hidden_size=hidden_size,
dropout=dropout,
activation_dropout=activation_dropout_prob,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=activation,
normalization=normalization,
layer_norm_eps=layer_norm_eps,
dtype=self._dtype))
|
Parameters
----------
use_bottleneck
Whether to use the bottleneck layer.
units
size of inter-bottleneck
real_units
size of intra-bottleneck
hidden_size
size of feed-forward network
num_heads
num_stacked_ffn
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
activation
normalization
layer_norm_eps
onlyv valid when normalization is 'layer_norm'
use_qkv_bias
weight_initializer
bias_initializer
dtype
Data type of the block
layout
Layout of the input + output
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def forward(self, data, attn_mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
The attention mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
"""
if self._use_bottleneck:
bn_proj = self.in_bottleneck_proj(data)
bn_proj = self.in_bottleneck_ln(bn_proj)
input = bn_proj
if self._bottleneck_strategy == 'qk_sharing':
# for Mobile Bert
qk_shared = self.shared_qk(data)
qk_shared = self.shared_qk_ln(qk_shared)
query = qk_shared
key = qk_shared
value = data
elif self._bottleneck_strategy == 'from_bottleneck':
# for Mobile Bert Tiny
query = bn_proj
key = bn_proj
value = bn_proj
elif self._bottleneck_strategy == 'from_input':
query = data
key = data
value = data
else:
raise NotImplementedError
else:
input = data
query = data
key = data
value = data
query = npx.reshape(self.attn_query(query), (-2, -2, self._num_heads, -1))
key = npx.reshape(self.attn_key(key), (-2, -2, self._num_heads, -1))
value = npx.reshape(self.attn_value(value), (-2, -2, self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, attn_mask)
out = self.attention_proj(out)
if not self._use_bottleneck:
out = self.dropout_layer(out)
out = out + input
out = self.layer_norm(out)
for ffn_idx in range(self._num_stacked_ffn):
ffn = self.stacked_ffn[ffn_idx]
out = ffn(out)
if self._use_bottleneck:
out = self.out_bottleneck_proj(out)
out = self.dropout_layer(out)
out = out + data
out = self.out_bottleneck_ln(out)
return out, attn_weight
|
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
The attention mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""
Generate the representation given the inputs.
This is used in training or fine-tuning a mobile bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
elif self._layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Received layout="{}". '
'Only "NT" and "TN" are supported.'.format(self._layout))
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
layout=self._layout,
attn_type='full')
out = data
all_encodings_outputs = []
additional_outputs = []
all_encodings_outputs.append(out)
for layer_idx in range(self._num_layers):
layer = self.all_layers[layer_idx]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = npx.sequence_mask(out,
sequence_length=valid_length,
use_sequence_length=True,
axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = npx.sequence_mask(out, sequence_length=valid_length,
use_sequence_length=True,
axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs
|
Generate the representation given the inputs.
This is used in training or fine-tuning a mobile bert model.
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length):
# pylint: disable=arguments-differ
"""Generate the representation given the inputs.
This is used in training or fine-tuning a mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding :
Shape (batch_size, seq_length, units).
pooled_output :
This is optional. Shape (batch_size, units)
"""
embedding = self.get_initial_embedding(inputs, token_types)
if self._compute_layout != self._layout:
contextual_embeddings, additional_outputs = self.encoder(np.swapaxes(embedding, 0, 1),
valid_length)
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(embedding, valid_length)
if self.use_pooler:
pooled_out = self.apply_pooling(contextual_embeddings)
return contextual_embeddings, pooled_out
else:
return contextual_embeddings
|
Generate the representation given the inputs.
This is used in training or fine-tuning a mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding :
Shape (batch_size, seq_length, units).
pooled_output :
This is optional. Shape (batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
Type of tokens. If None, it will be initialized as all zero
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
"""
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
elif self._layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError
word_embedding = self.word_embed(inputs)
if self.trigram_embed:
if self._layout == 'NT':
word_embedding = np.concatenate(
[np.pad(word_embedding[:, 1:], ((0, 0), (0, 1), (0, 0))),
word_embedding,
np.pad(word_embedding[:, :-1], ((0, 0), (1, 0), (0, 0)))], axis=-1)
elif self._layout == 'TN':
word_embedding = np.concatenate(
[np.pad(word_embedding[1:, :], ((0, 1), (0, 0), (0, 0))),
word_embedding,
np.pad(word_embedding[:-1, :], ((1, 0), (0, 0), (0, 0)))], axis=-1)
else:
raise NotImplementedError
# Projecting the embedding into units only for word embedding
if self.trigram_embed or self.embed_size != self.units:
word_embedding = self.embed_factorized_proj(word_embedding)
if token_types is None:
token_types = np.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = word_embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding =\
self.token_pos_embed(npx.arange_like(embedding, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
Type of tokens. If None, it will be initialized as all zero
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
outputs
Shape (batch_size, units)
"""
if self._layout == 'NT':
outputs = sequence[:, 0, :]
else:
outputs = sequence[0, :, :]
if self.classifier_activation:
return self.pooler(outputs)
else:
return outputs
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
outputs
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.backbone_model.layout == 'TN':
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
else:
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
intermediate_output = self.mlm_decoder(mlm_features)
if self.backbone_model.embed_size != self.backbone_model.units:
scores = self.embedding_table(
intermediate_output[:, :, :self.backbone_model.embed_size])
extra_scores = self.extra_table(
intermediate_output[:, :, self.backbone_model.embed_size:])
mlm_scores = scores + extra_scores
else:
mlm_scores = self.embedding_table(intermediate_output)
return contextual_embeddings, pooled_out, mlm_scores
|
Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a mobile mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score
Shape (batch_size, 2)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
nsp_score = self.nsp_classifier(pooled_out)
if self.backbone_model.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
intermediate_output = self.mlm_decoder(mlm_features)
if self.backbone_model.embed_size != self.backbone_model.units:
scores = self.embedding_table(
intermediate_output[:, :, :self.backbone_model.embed_size])
extra_scores = self.extra_table(
intermediate_output[:, :, self.backbone_model.embed_size:])
mlm_scores = scores + extra_scores
else:
mlm_scores = self.embedding_table(intermediate_output)
return contextual_embeddings, pooled_out, nsp_score, mlm_scores
|
Generate the representation given the inputs.
This is used in training or fine-tuning a mobile mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score
Shape (batch_size, 2)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def get_pretrained_mobilebert(model_name: str = 'google_uncased_mobilebert',
root: str = get_model_zoo_home_dir(),
load_backbone: str = True,
load_mlm: str = False)\
-> Tuple[CN, HuggingFaceWordPieceTokenizer, str, str]:
"""Get the pretrained mobile bert weights
Parameters
----------
model_name
The name of the mobile bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_mobilebert())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceWordPieceTokenizer(
vocab_file=local_paths['vocab'],
unk_token='[UNK]',
pad_token='[PAD]',
cls_token='[CLS]',
sep_token='[SEP]',
mask_token='[MASK]',
lowercase=do_lower)
if cfg is None:
cfg = MobileBertModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path
|
Get the pretrained mobile bert weights
Parameters
----------
model_name
The name of the mobile bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
|
get_pretrained_mobilebert
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/mobilebert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py
|
Apache-2.0
|
def __init__(self,
vocab_size=50265,
units=768,
hidden_size=3072,
num_layers=12,
num_heads=12,
max_length=512,
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
pos_embed_type='learned',
activation='gelu',
pooler_activation='tanh',
layer_norm_eps=1E-5,
embed_initializer=TruncNorm(stdev=0.02),
weight_initializer=TruncNorm(stdev=0.02),
bias_initializer='zeros',
dtype='float32',
use_pooler=True,
classifier_activation=False,
encoder_normalize_before=True,
output_all_encodings=False,
layout='NT',
compute_layout='auto'):
"""
Parameters
----------
vocab_size
units
hidden_size
num_layers
num_heads
max_length
hidden_dropout_prob
attention_dropout_prob
pos_embed_type
activation
pooler_activation
layer_norm_eps
embed_initializer
weight_initializer
bias_initializer
dtype
use_pooler
Whether to output the CLS hidden state
classifier_activation
Whether to use classification head
encoder_normalize_before
Whether to normalize before the
output_all_encodings
Whether to output all encodings
layout
The layout
compute_layout
The computation layout
"""
super().__init__()
self._dtype = dtype
self._output_all_encodings = output_all_encodings
self.vocab_size = vocab_size
self.units = units
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_heads = num_heads
self.max_length = max_length
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_dropout_prob = attention_dropout_prob
self.pos_embed_type = pos_embed_type
self.activation = activation
self.pooler_activation = pooler_activation
self.layer_norm_eps = layer_norm_eps
self.use_pooler = use_pooler
self.classifier_activation = classifier_activation
self.encoder_normalize_before = encoder_normalize_before
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self._layout = layout
if compute_layout == 'auto' or compute_layout is None:
self._compute_layout = layout
else:
self._compute_layout = compute_layout
self.word_embed = nn.Embedding(
input_dim=self.vocab_size,
output_dim=self.units,
weight_initializer=embed_initializer,
dtype=self._dtype
)
if self.encoder_normalize_before:
self.embed_ln = nn.LayerNorm(
epsilon=self.layer_norm_eps,
in_channels=self.units)
self.embed_dropout = nn.Dropout(self.hidden_dropout_prob)
self.pos_embed = PositionalEmbedding(
units=self.units,
max_length=self.max_length,
dtype=self._dtype,
method=pos_embed_type)
self.encoder = RobertaEncoder(
units=self.units,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
num_heads=self.num_heads,
attention_dropout_prob=self.attention_dropout_prob,
hidden_dropout_prob=self.hidden_dropout_prob,
layer_norm_eps=self.layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=self.activation,
dtype=self._dtype,
output_all_encodings=self._output_all_encodings,
layout=self._compute_layout,
)
if self.use_pooler and self.classifier_activation:
# Construct pooler
self.pooler = nn.Dense(units=self.units,
in_units=self.units,
flatten=False,
activation=self.pooler_activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
|
Parameters
----------
vocab_size
units
hidden_size
num_layers
num_heads
max_length
hidden_dropout_prob
attention_dropout_prob
pos_embed_type
activation
pooler_activation
layer_norm_eps
embed_initializer
weight_initializer
bias_initializer
dtype
use_pooler
Whether to output the CLS hidden state
classifier_activation
Whether to use classification head
encoder_normalize_before
Whether to normalize before the
output_all_encodings
Whether to output all encodings
layout
The layout
compute_layout
The computation layout
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/roberta.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
"""
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
else:
batch_axis, time_axis = 1, 0
embedding = self.word_embed(inputs)
if self.pos_embed_type:
positional_embedding = self.pos_embed(npx.arange_like(inputs, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
if self.encoder_normalize_before:
embedding = self.embed_ln(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/roberta.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
|
Apache-2.0
|
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
ret
Shape (batch_size, units)
"""
if self._layout == 'NT':
outputs = sequence[:, 0, :]
elif self._layout == 'TN':
outputs = sequence[0, :, :]
else:
raise NotImplementedError
if self.classifier_activation:
return self.pooler(outputs)
else:
return outputs
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
ret
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/roberta.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
|
Apache-2.0
|
def forward(self, inputs, valid_length, masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
all_encodings_outputs, pooled_out = self.backbone_model(inputs, valid_length)
if self.backbone_model._output_all_encodings:
contextual_embeddings = all_encodings_outputs[-1]
else:
contextual_embeddings = all_encodings_outputs
if self.backbone_model.layout == 'TN':
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return all_encodings_outputs, pooled_out, mlm_scores
|
Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/roberta.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
|
Apache-2.0
|
def get_pretrained_roberta(model_name: str = 'fairseq_roberta_base',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_mlm: bool = False) \
-> Tuple[CN, HuggingFaceByteBPETokenizer, str, str]:
"""Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_roberta())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
merges_path = PRETRAINED_URL[model_name]['merges']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path), ('merges', merges_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceByteBPETokenizer(
merges_file=local_paths['merges'],
vocab_file=local_paths['vocab'],
lowercase=do_lower)
if cfg is None:
cfg = RobertaModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path
|
Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
|
get_pretrained_roberta
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/roberta.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py
|
Apache-2.0
|
def __init__(
self,
d_model,
d_kv,
d_ff,
is_decoder,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
is_decoder
If is_decoder, apply cross-attention.
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._is_decoder = is_decoder
self._num_heads = num_heads
self._inner_dim = self._num_heads * self._d_kv
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
self.self_attn_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
# avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
self.self_attn_q = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal((d_model * d_kv) ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn_k = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn_v = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn = MultiHeadAttentionCell(
query_units=self._inner_dim,
num_heads=num_heads,
attention_dropout=dropout_prob,
scaled=False,
normalized=False,
dtype=dtype,
layout='NTK' if layout == 'NT' else 'TNK',
use_einsum=False
)
self.self_attn_proj = nn.Dense(
units=d_model,
in_units=self._inner_dim,
flatten=False,
use_bias=False,
weight_initializer=Normal(self._inner_dim ** -0.5 * init_factor),
dtype=dtype
)
if is_decoder:
self.cross_attn_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
# avoid scaling before softmax
self.cross_attn_q = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal((d_model * d_kv) ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn_k = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn_v = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn = MultiHeadAttentionCell(
query_units=self._inner_dim,
num_heads=num_heads,
attention_dropout=dropout_prob,
scaled=False,
normalized=False,
dtype=dtype,
layout='NTK' if layout == 'NT' else 'TNK',
use_einsum=False
)
self.cross_attn_proj = nn.Dense(
units=d_model,
in_units=self._inner_dim,
flatten=False,
use_bias=False,
weight_initializer=Normal(self._inner_dim ** -0.5 * init_factor),
dtype=dtype
)
assert activation in ['relu', 'gated-gelu'], \
'{} is not supported. Please choose from "relu" and "gated-gelu"'.format(activation)
# the weight_initializer here is equivalent to Normal(in_units ** -0.5 * init_factor)
self.ffn = PositionwiseFFN(
units=d_model,
hidden_size=d_ff,
use_bias=False,
activation_dropout=dropout_prob,
dropout=dropout_prob,
weight_initializer=Xavier('gaussian', 'in', np.sqrt(init_factor)),
activation='relu' if activation == 'relu' else 'gelu(tanh)',
use_gated_activation=False if activation == 'relu' else True,
normalization='rms_norm',
layer_norm_eps=layer_norm_eps,
pre_norm=True,
dtype=dtype,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor)
)
self.dropout = nn.Dropout(dropout_prob)
|
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
is_decoder
If is_decoder, apply cross-attention.
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def incremental_decode(
self,
step_hidden_states,
step_position_embeddings,
past_key_value,
mem_states,
step_mem_attn_mask
):
"""Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
step_position_embeddings
Stepwise relative position embeddings.
Shape (num_heads, 1, (L_past_seq + 1))
past_key_value
A tuple containing past key and past value. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, L_past_seq, num_heads, d_kv)
- layout = 'TN'
Shape (L_past_seq, B, num_heads, d_kv)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
step_mem_attn_mask
Stepwise attention mask for cross-attention.
Shape (B, 1, L_src_seq)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
(self_key, self_value)
The updated `past_key_value` tuple. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, (L_past_seq + 1), num_heads, d_kv)
- layout = 'TN'
Shape ((L_past_seq + 1), B, num_heads, d_kv)
"""
# 1. self-attention
out = self.self_attn_layer_norm(step_hidden_states)
step_self_query, step_self_key, step_self_value = (
self.transpose_for_scores(self.self_attn_q(out)),
self.transpose_for_scores(self.self_attn_k(out)),
self.transpose_for_scores(self.self_attn_v(out))
)
self_key, self_value = (
np.concatenate([past_key_value[0], step_self_key], axis=self._time_axis),
np.concatenate([past_key_value[1], step_self_value], axis=self._time_axis)
)
out, _ = self.self_attn(
step_self_query,
self_key,
self_value,
None,
step_position_embeddings
)
out = self.dropout(self.self_attn_proj(out))
step_hidden_states = step_hidden_states + out
# 2. cross-attention
out = self.cross_attn_layer_norm(step_hidden_states)
step_cross_query, cross_key, cross_value = (
self.transpose_for_scores(self.cross_attn_q(out)),
self.transpose_for_scores(self.cross_attn_k(mem_states)),
self.transpose_for_scores(self.cross_attn_v(mem_states))
)
out, _ = self.cross_attn(
step_cross_query,
cross_key,
cross_value,
step_mem_attn_mask
)
out = self.dropout(self.cross_attn_proj(out))
step_hidden_states = step_hidden_states + out
# 3. feed forward
step_hidden_states = self.ffn(step_hidden_states)
return step_hidden_states, (self_key, self_value)
|
Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
step_position_embeddings
Stepwise relative position embeddings.
Shape (num_heads, 1, (L_past_seq + 1))
past_key_value
A tuple containing past key and past value. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, L_past_seq, num_heads, d_kv)
- layout = 'TN'
Shape (L_past_seq, B, num_heads, d_kv)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
step_mem_attn_mask
Stepwise attention mask for cross-attention.
Shape (B, 1, L_src_seq)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
(self_key, self_value)
The updated `past_key_value` tuple. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, (L_past_seq + 1), num_heads, d_kv)
- layout = 'TN'
Shape ((L_past_seq + 1), B, num_heads, d_kv)
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def forward(
self,
hidden_states,
self_attn_mask,
position_embeddings,
mem_states=None,
mem_attn_mask=None
):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
self_attn_mask
if is_decoder, it should be a "causal" attention mask.
Shape (B, L_seq, L_seq)
position_embeddings
Relative position embeddings for self-attention, while cross-attention is free of position encoding.
Shape (num_heads, L_seq, L_seq)
mem_states
Encoded results. Only applicable to decoder layers.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_attn_mask
Attention mask ask for cross-attention. Only applicable to decoder layers.
Shape (B, L_seq, L_src_seq)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. self-attention
out = self.self_attn_layer_norm(hidden_states)
self_query, self_key, self_value = (
self.transpose_for_scores(self.self_attn_q(out)),
self.transpose_for_scores(self.self_attn_k(out)),
self.transpose_for_scores(self.self_attn_v(out))
)
out, _ = self.self_attn(
self_query,
self_key,
self_value,
self_attn_mask,
position_embeddings
)
out = self.dropout(self.self_attn_proj(out))
hidden_states = hidden_states + out
# 2. cross-attention, if needed
if self._is_decoder:
out = self.cross_attn_layer_norm(hidden_states)
cross_query, cross_key, cross_value = (
self.transpose_for_scores(self.cross_attn_q(out)),
self.transpose_for_scores(self.cross_attn_k(mem_states)),
self.transpose_for_scores(self.cross_attn_v(mem_states))
)
out, _ = self.cross_attn(
cross_query,
cross_key,
cross_value,
mem_attn_mask
)
out = self.dropout(self.cross_attn_proj(out))
hidden_states = hidden_states + out
# 3. feed forward
hidden_states = self.ffn(hidden_states)
return hidden_states
|
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
self_attn_mask
if is_decoder, it should be a "causal" attention mask.
Shape (B, L_seq, L_seq)
position_embeddings
Relative position embeddings for self-attention, while cross-attention is free of position encoding.
Shape (num_heads, L_seq, L_seq)
mem_states
Encoded results. Only applicable to decoder layers.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_attn_mask
Attention mask ask for cross-attention. Only applicable to decoder layers.
Shape (B, L_seq, L_src_seq)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def __init__(
self,
d_model,
d_kv,
d_ff,
num_layers=12,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._num_layers = num_layers
self._num_heads = num_heads
self._inner_dim = num_heads * d_kv
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
self.relative_position_encoder = RelAttentionScoreCell(
query_units=self._inner_dim,
num_heads=num_heads,
method='t5',
bidirectional=True,
embed_initializer=Normal(d_model ** -0.5 * init_factor),
layout='NTK' if layout == 'NT' else 'TNK',
dtype=dtype
)
self.layers = nn.HybridSequential()
for _ in range(num_layers):
self.layers.add(
T5Block(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
is_decoder=False,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
)
self.final_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
self.dropout = nn.Dropout(dropout_prob)
|
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def forward(self, hidden_states, valid_length):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. relative position embeddings and attention masks
position_embeddings = self.relative_position_encoder(
gen_rel_position(hidden_states, layout=self.layout)
)
self_attn_mask = gen_self_attn_mask(
hidden_states,
valid_length,
dtype=self._dtype,
attn_type='full',
layout=self.layout
)
# 2. encoder blocks and other layers
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
hidden_states = layer(
hidden_states,
self_attn_mask,
position_embeddings
)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def incremental_decode(
self,
step_hidden_states,
position,
past_key_values,
mem_states,
mem_valid_length
):
"""Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
position
Current position index in incremental decoding.
Shape (B,)
past_key_values
A list of tuples where each one corresponds to the `past_key_value` of a decoder layer.
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
present_key_values
A list of tuples containing the updated `past_key_value` for each decoder layer.
"""
# 1. relative position embeddings and attention mask
# step_position_embeddings: Shape (num_heads, 1, L_seq), for self-attention
# step_mem_attn_mask: Shape (B, 1, L_src_seq), for cross-attention
position_embeddings = self.relative_position_encoder(
gen_rel_position(
step_hidden_states,
past_data=past_key_values[0][0],
layout=self.layout
)
)
step_position_embeddings = position_embeddings[:, -1:, :]
step_mem_attn_mask = gen_mem_attn_mask(
mem_states,
mem_valid_length,
step_hidden_states,
dtype=self._dtype,
layout=self.layout
)
# 2. decoder blocks and other layers
step_hidden_states = self.dropout(step_hidden_states)
present_key_values = []
for i, layer in enumerate(self.layers):
step_hidden_states, present_key_value = layer.incremental_decode(
step_hidden_states,
step_position_embeddings,
past_key_values[i],
mem_states,
step_mem_attn_mask
)
present_key_values.append(present_key_value)
step_hidden_states = self.final_layer_norm(step_hidden_states)
step_hidden_states = self.dropout(step_hidden_states)
return step_hidden_states, present_key_values
|
Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
position
Current position index in incremental decoding.
Shape (B,)
past_key_values
A list of tuples where each one corresponds to the `past_key_value` of a decoder layer.
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
present_key_values
A list of tuples containing the updated `past_key_value` for each decoder layer.
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def forward(self, hidden_states, valid_length, mem_states, mem_valid_length):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. relative position embeddings and attention masks
# position_embeddings: Shape (num_heads, L_seq, L_seq), broadcastable, for self-attention
# self_attn_mask: Shape (B, L_seq, L_seq), for self-attention
# mem_attn_mask: Shape (B, L_seq, L_src_seq), for cross-attention
position_embeddings = self.relative_position_encoder(
gen_rel_position(hidden_states, layout=self.layout)
)
self_attn_mask = gen_self_attn_mask(
hidden_states,
valid_length,
dtype=self._dtype,
attn_type='causal',
layout=self.layout
)
mem_attn_mask = gen_mem_attn_mask(
mem_states,
mem_valid_length,
hidden_states,
valid_length,
dtype=self._dtype,
layout=self.layout
)
# 2. decoder blocks and other layers
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
hidden_states = layer(
hidden_states,
self_attn_mask,
position_embeddings,
mem_states,
mem_attn_mask
)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def __init__(
self,
vocab_size=32128,
d_model=768,
d_kv=64,
d_ff=3072,
num_layers=12,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
vocab_size
vocab_size should be no smaller than len(tokenizer._sp_model).
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
assert vocab_size > 0, 'Vocab size {} is not valid.'.format(vocab_size)
self._vocab_size = vocab_size
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._num_layers = num_layers
self._num_heads = num_heads
self._inner_dim = num_heads * d_kv
self._activation = activation
self._init_factor = init_factor
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
# input embedding weights are shared between across encoder and decoder
self.input_embedding_layer = nn.Embedding(
input_dim=vocab_size,
output_dim=d_model,
weight_initializer=Normal(1.0 * init_factor),
dtype=dtype
)
self.encoder = T5Encoder(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
self.decoder = T5Decoder(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
|
Parameters
----------
vocab_size
vocab_size should be no smaller than len(tokenizer._sp_model).
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def encode(self, src_data, src_valid_length):
"""Encode the source data to memory states.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
"""
src_hidden_states = self.input_embedding_layer(src_data)
enc_out = self.encoder(
src_hidden_states,
src_valid_length
)
return enc_out
|
Encode the source data to memory states.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
|
encode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def decode(self, tgt_data, tgt_valid_length, mem_states, mem_valid_length):
"""Decode based on target data and memory states.
Parameters
----------
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
tgt_hidden_states = self.input_embedding_layer(tgt_data)
dec_out = self.decoder(
tgt_hidden_states,
tgt_valid_length,
mem_states,
mem_valid_length
)
return dec_out
|
Decode based on target data and memory states.
Parameters
----------
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
|
decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length):
"""
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
enc_out = self.encode(src_data, src_valid_length)
dec_out = self.decode(tgt_data, tgt_valid_length, enc_out, src_valid_length)
return dec_out
|
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def state_batch_axis(self):
"""The returned 4-tuple corresponds to the batch axes of `init_states()` results.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axes
"""
if self.model.layout == 'NT':
return 0, 0, 0, self.model.decoder.state_batch_axis
else:
return 1, 0, 0, self.model.decoder.state_batch_axis
|
The returned 4-tuple corresponds to the batch axes of `init_states()` results.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axes
|
state_batch_axis
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def init_states(self, src_data, src_valid_length):
"""Initialize the states required for incremental decoding.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
Encoded results from src_data.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
src_valid_length
Shape (B,)
position
Shape (B,)
dec_states
A list of `past_key_value` for incremental decoding.
"""
batch_size = src_data.shape[1 - self.model._time_axis] # NT: 0; TN: 1
ctx = src_data.ctx
enc_out = self.model.encode(src_data, src_valid_length)
position = np.zeros((batch_size,), dtype=np.int32, ctx=ctx)
key_values = self.model.decoder._init_key_values(batch_size, ctx, dtype=enc_out.dtype)
return enc_out, src_valid_length, position, key_values
|
Initialize the states required for incremental decoding.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
Encoded results from src_data.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
src_valid_length
Shape (B,)
position
Shape (B,)
dec_states
A list of `past_key_value` for incremental decoding.
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def forward(self, step_data, past_states):
"""
Parameters
----------
step_data
Stepwise batched token ids for incremental decoding.
Shape (B,)
past_states
A 4-tuple containing states of last incremental decoding step.
1. mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
2. mem_valid_length
Shape (B,)
3. position
Shape (B,)
4. dec_states
A list of `past_key_value` tuples whose shape depend on layout.
Returns
-------
step_hidden_states
Stepwise hidden states with time axis squeezed out.
Shape (B, vocab_size)
new_states
Similar to past_states, but updated for next incremental decoding step.
"""
mem_states, mem_valid_length, position, past_key_values = past_states
step_hidden_states = self.model.input_embedding_layer(step_data)
# NT: (B, d_model) -> (B, 1, d_model); TN: (B, d_model) -> (1, B, d_model)
step_hidden_states = np.expand_dims(step_hidden_states, axis=self.model._time_axis)
step_hidden_states, present_key_values = self.model.decoder.incremental_decode(
step_hidden_states,
position,
past_key_values,
mem_states,
mem_valid_length
)
step_hidden_states = self.output_layer(step_hidden_states)
# NT: (B, 1, vocab_size) -> (B, vocab_size); TN: (1, B, vocab_size) -> (B, vocab_size)
step_hidden_states = npx.reshape(step_hidden_states, (-5, -1))
return step_hidden_states, (mem_states, mem_valid_length, position + 1, present_key_values)
|
Parameters
----------
step_data
Stepwise batched token ids for incremental decoding.
Shape (B,)
past_states
A 4-tuple containing states of last incremental decoding step.
1. mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
2. mem_valid_length
Shape (B,)
3. position
Shape (B,)
4. dec_states
A list of `past_key_value` tuples whose shape depend on layout.
Returns
-------
step_hidden_states
Stepwise hidden states with time axis squeezed out.
Shape (B, vocab_size)
new_states
Similar to past_states, but updated for next incremental decoding step.
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/t5.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py
|
Apache-2.0
|
def transformer_base():
"""Configuration of Transformer WMT EN-DE Base"""
cfg = CN()
cfg.MODEL = CN()
cfg.MODEL.src_vocab_size = -1
cfg.MODEL.tgt_vocab_size = -1
cfg.MODEL.max_src_length = -1
cfg.MODEL.max_tgt_length = -1
cfg.MODEL.scale_embed = True
cfg.MODEL.pos_embed_type = "sinusoidal"
cfg.MODEL.shared_embed = True
cfg.MODEL.tie_weights = True
cfg.MODEL.attention_dropout = 0.0
cfg.MODEL.activation_dropout = 0.0
cfg.MODEL.dropout = 0.1
cfg.MODEL.layout = 'NT'
cfg.MODEL.dtype = 'float32'
# Parameters for the encoder
cfg.MODEL.ENCODER = CN()
cfg.MODEL.ENCODER.num_layers = 6
cfg.MODEL.ENCODER.units = 512
cfg.MODEL.ENCODER.num_heads = 8
cfg.MODEL.ENCODER.hidden_size = 2048
cfg.MODEL.ENCODER.recurrent = False
cfg.MODEL.ENCODER.activation = 'relu'
cfg.MODEL.ENCODER.pre_norm = False
cfg.MODEL.ENCODER.use_qkv_bias = True
# Parameters for the decoder
cfg.MODEL.DECODER = CN()
cfg.MODEL.DECODER.num_layers = 6
cfg.MODEL.DECODER.units = 512
cfg.MODEL.DECODER.num_heads = 8
cfg.MODEL.DECODER.hidden_size = 2048
cfg.MODEL.DECODER.recurrent = False
cfg.MODEL.DECODER.activation = 'relu'
cfg.MODEL.DECODER.pre_norm = False
cfg.MODEL.DECODER.use_qkv_bias = False
# Parameters for the initializer
cfg.INITIALIZER = CN()
cfg.INITIALIZER.embed = ['xavier', 'gaussian', 'in', 1.0]
cfg.INITIALIZER.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.INITIALIZER.bias = ['zeros']
cfg.VERSION = 1
cfg.freeze()
return cfg
|
Configuration of Transformer WMT EN-DE Base
|
transformer_base
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
num_heads: int = 8,
attention_dropout_prob: float = 0.1,
hidden_dropout_prob: float = 0.1,
activation_dropout_prob: float = 0.0,
layer_norm_eps: float = 1e-12,
pre_norm: bool = False,
use_qkv_bias: bool = True,
weight_initializer: Optional[InitializerType] = None,
bias_initializer: Optional[InitializerType] = 'zeros',
activation: str = 'relu',
dtype='float32',
layout='NT'):
"""
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
weight_initializer
bias_initializer
activation
dtype
layout
"""
super().__init__()
self._units = units
self._hidden_size = hidden_size
self._num_heads = num_heads
self._attention_dropout_prob = attention_dropout_prob
self._hidden_dropout_prob = hidden_dropout_prob
self._activation_dropout_prob = activation_dropout_prob
self._pre_norm = pre_norm
self._dtype = dtype
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
assert self._units % self._num_heads == 0, 'units must be divisive by the number of heads'
self.dropout_layer = nn.Dropout(hidden_dropout_prob)
self.attn_qkv = nn.Dense(3 * units,
flatten=False,
use_bias=use_qkv_bias,
in_units=units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.attention_proj = nn.Dense(units=units,
flatten=False,
in_units=units,
use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
attention_layout = 'NTK' if self._layout == 'NT' else 'TNK'
self.attention_cell = \
MultiHeadAttentionCell(
query_units=self._units,
num_heads=self._num_heads,
attention_dropout=self._attention_dropout_prob,
scaled=True,
dtype=self._dtype,
layout=attention_layout
)
self.layer_norm = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ffn = PositionwiseFFN(units=units,
hidden_size=hidden_size,
dropout=hidden_dropout_prob,
activation_dropout=activation_dropout_prob,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
layer_norm_eps=layer_norm_eps,
activation=activation,
pre_norm=pre_norm,
dtype=self._dtype)
|
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
weight_initializer
bias_initializer
activation
dtype
layout
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, data, attn_mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
"""
if self._pre_norm:
data = self.layer_norm(data)
query, key, value = np.split(self.attn_qkv(data), 3, axis=-1)
query = npx.reshape(query, (-2, -2, self._num_heads, -1))
key = npx.reshape(key, (-2, -2, self._num_heads, -1))
value = npx.reshape(value, (-2, -2, self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, attn_mask)
out = self.attention_proj(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.layer_norm(out)
out = self.ffn(out)
return out, attn_weight
|
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def __init__(self, num_layers=6, recurrent=False,
units=512, hidden_size=2048, num_heads=8,
activation_dropout=0.0, dropout=0.1, use_qkv_bias=True,
attention_dropout=0.1, layer_norm_eps=1E-5, data_norm=False,
pre_norm=False, weight_initializer=None, bias_initializer='zeros',
activation='relu', dtype='float32', layout='NT'):
"""
Parameters
----------
num_layers :
The number of layers
recurrent : bool
Whether the layers share weights or not
units
hidden_size
num_heads
dropout
layer_norm_eps
data_norm
Whether to apply LayerNorm to the data
pre_norm
Whether to apply LayerNorm before the attention layer.
weight_initializer
bias_initializer
activation
dtype
layout
"""
super().__init__()
self._dtype = dtype
self.num_layers = num_layers
self._recurrent = recurrent
self._data_norm = data_norm
self._pre_norm = pre_norm
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
self.dropout_layer = nn.Dropout(dropout)
if self._pre_norm:
self.ln_final = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
if self._data_norm:
self.ln_data = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
# Construct the intermediate layers
self.layers = nn.HybridSequential()
real_num_layers = 1 if recurrent else num_layers
for i in range(real_num_layers):
self.layers.add(TransformerEncoderLayer(
units=units,
hidden_size=hidden_size,
num_heads=num_heads,
hidden_dropout_prob=dropout,
attention_dropout_prob=attention_dropout,
activation_dropout_prob=activation_dropout,
use_qkv_bias=use_qkv_bias,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
pre_norm=pre_norm,
activation=activation,
layout=self._layout,
dtype=dtype))
|
Parameters
----------
num_layers :
The number of layers
recurrent : bool
Whether the layers share weights or not
units
hidden_size
num_heads
dropout
layer_norm_eps
data_norm
Whether to apply LayerNorm to the data
pre_norm
Whether to apply LayerNorm before the attention layer.
weight_initializer
bias_initializer
activation
dtype
layout
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
layout=self.layout,
attn_type='full')
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
for i in range(self.num_layers):
if self._recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, _ = layer(out, attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out
|
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def __init__(self, units: int = 512,
mem_units: Optional[int] = None,
hidden_size: int = 2048,
num_heads: int = 8,
activation_dropout: float = 0.0,
dropout: float = 0.1,
attention_dropout: float = 0.1,
layer_norm_eps: float = 1E-5,
activation: str = 'relu',
pre_norm: bool = False,
use_qkv_bias: bool = True,
weight_initializer=None,
bias_initializer='zeros',
dtype='float32',
layout='NT'):
"""
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
weight_initializer
bias_initializer
dtype
Data type
layout
Layout of the input
"""
super().__init__()
self._dtype = dtype
self._units = units
if mem_units is None:
mem_units = units
self._mem_units = mem_units
self._pre_norm = pre_norm
self._num_heads = num_heads
self._attention_dropout = attention_dropout
self._dtype = dtype
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
attention_layout = 'NTK' if layout == 'NT' else 'TNK'
self.dropout_layer = nn.Dropout(dropout)
if units % num_heads:
raise ValueError('In Transformer, units should be divided exactly by the number of '
'heads. Received units={}, num_heads={}'.format(units, num_heads))
self.attn_in_qkv = nn.Dense(3 * units, in_units=units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.self_attention = MultiHeadAttentionCell(query_units=units,
num_heads=num_heads,
attention_dropout=self._attention_dropout,
dtype=dtype,
layout=attention_layout)
self.proj_in = nn.Dense(units=units, in_units=units, flatten=False, use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_q = nn.Dense(units,
in_units=units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_k = nn.Dense(units, in_units=mem_units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_v = nn.Dense(units, in_units=mem_units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.inter_attention = MultiHeadAttentionCell(query_units=units,
num_heads=num_heads,
attention_dropout=self._attention_dropout,
dtype=dtype,
layout=attention_layout)
self.proj_inter = nn.Dense(units=units, in_units=units,
flatten=False, use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
# TODO(sxjscience) Add DType to LayerNorm
self.ln_in = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ln_inter = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ffn = PositionwiseFFN(units=units,
hidden_size=hidden_size,
dropout=dropout,
activation_dropout=activation_dropout,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
layer_norm_eps=layer_norm_eps,
activation=activation,
pre_norm=pre_norm,
dtype=dtype)
|
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
weight_initializer
bias_initializer
dtype
Data type
layout
Layout of the input
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, data, mem, self_causal_mask, mem_attn_mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
.. code-block:: none
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Get the causal self-attention value
if self._pre_norm:
data = self.ln_in(data)
self_query, self_key, self_value = np.split(self.attn_in_qkv(data), 3, axis=-1)
out, [_, self_attn_weight] = self.self_attention(
npx.reshape(self_query, (-2, -2, self._num_heads, -1)),
npx.reshape(self_key, (-2, -2, self._num_heads, -1)),
npx.reshape(self_value, (-2, -2, self._num_heads, -1)),
self_causal_mask)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
if self._pre_norm:
data = self.ln_inter(data)
out, [_, context_attn_weight] = self.inter_attention(
npx.reshape(self.attn_inter_q(data), (-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_k(mem), (-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_v(mem), (-2, -2, self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
return out
|
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
.. code-block:: none
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def init_states(self, batch_size, ctx, dtype='float32'):
"""Initialize the states required for incremental decoding
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
if self.layout == 'NT':
init_key = mx.np.zeros(shape=(batch_size, 0, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
init_value = mx.np.zeros(shape=(batch_size, 0, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
else:
init_key = mx.np.zeros(shape=(0, batch_size, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
init_value = mx.np.zeros(shape=(0, batch_size, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
return init_key, init_value
|
Initialize the states required for incremental decoding
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
"""
if self._pre_norm:
data = self.ln_in(data)
if self.layout == 'NT':
time_axis = 1
else:
time_axis = 0
data = np.expand_dims(data, axis=time_axis)
# Shape (B, prev_L, #Head, C_K), (B, prev_L, #Head, C_V)
# or (prev_L, B, #Head, C_K), (prev_L, B, #Head, C_V)
prev_key, prev_value = states
if mem_attn_mask is None:
mem_attn_mask = gen_mem_attn_mask(mem, mem_valid_length, data, None,
dtype=self._dtype, layout=self.layout)
# 1. Get the causal self-attention value, we need to attend to both the current data
# and the previous stored key/values
# Shape (B, 1, 3 * num_heads * C_key)
# or (1, B, 3 * num_heads * C_key)
step_qkv = self.attn_in_qkv(data)
step_query, step_key, step_value = np.split(step_qkv, 3, axis=-1)
step_query = npx.reshape(step_query, (-2, -2, self._num_heads, -1))
step_key = npx.reshape(step_key, (-2, -2, self._num_heads, -1))
step_value = npx.reshape(step_value, (-2, -2, self._num_heads, -1))
new_key = np.concatenate([prev_key, step_key], axis=time_axis)
new_value = np.concatenate([prev_value, step_value], axis=time_axis)
out, [_, attn_weight] = self.self_attention(step_query, new_key, new_value, None)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
if self._pre_norm:
data = self.ln_inter(data)
out, _ = self.inter_attention(npx.reshape(self.attn_inter_q(data),
(-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_k(mem),
(-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_v(mem),
(-2, -2, self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
out = npx.reshape(out, (-5, -1))
return out, (new_key, new_value)
|
Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, data, valid_length, mem_data, mem_valid_length):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
self_causal_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
attn_type='causal',
layout=self._layout)
mem_attn_mask = gen_mem_attn_mask(mem_data, mem_valid_length, data, valid_length,
dtype=self._dtype,
layout=self._layout)
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out = layer(out, mem_data, self_causal_mask, mem_attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out
|
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def init_states(self, batch_size, ctx, dtype='float32'):
"""Initialize the states required for incremental decoding
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
states.append(layer.init_states(batch_size=batch_size,
ctx=ctx,
dtype=dtype))
return states
|
Initialize the states required for incremental decoding
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def incremental_decode(self, data, states, mem, mem_valid_length):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
time_axis = 0 if self.layout == 'TN' else 1
# Generate the mem_attn_mask
time_steps = npx.arange_like(mem, axis=time_axis) # (mem_length,)
mem_attn_mask = np.reshape(time_steps, (1, 1, -1))\
< np.reshape(mem_valid_length, (-1, 1, 1))
# TODO(sxjscience) Try with boolean masking
mem_attn_mask = mem_attn_mask.astype(self._dtype)
new_states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, new_state = layer.incremental_decode(out, states[i],
mem, mem_valid_length, mem_attn_mask)
new_states.append(new_state)
if self._pre_norm:
out = self.ln_final(out)
return out, new_states
|
Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def __init__(self, src_vocab_size: int,
tgt_vocab_size: int,
max_src_length: Optional[int] = None,
max_tgt_length: Optional[int] = None,
scale_embed: bool = True,
pos_embed_type="sinusoidal",
shared_embed: bool = True,
tie_weights: bool = True,
activation_dropout: float = 0.0,
dropout: float = 0.1,
attention_dropout: float = 0.1,
layer_norm_eps: float = 1E-5,
data_norm: bool = False,
enc_units: int = 512,
enc_hidden_size: int = 2048,
enc_num_heads: int = 8,
enc_num_layers: int = 6,
enc_recurrent: bool = False,
enc_activation='relu',
enc_pre_norm: bool = False,
enc_use_qkv_bias: bool = True,
dec_units: int = 512,
dec_hidden_size: int = 2048,
dec_num_heads: int = 8,
dec_num_layers: int = 6,
dec_recurrent: bool = False,
dec_activation='relu',
dec_pre_norm: bool = False,
dec_use_qkv_bias: bool = True,
embed_initializer=mx.init.Xavier('gaussian', 'in', 1),
weight_initializer=mx.init.Xavier('uniform', 'avg', 3),
bias_initializer='zeros',
dtype='float32',
layout='NT'):
"""
Parameters
----------
src_vocab_size
The vocabulary size of the source language
tgt_vocab_size
The vocabulary size of the target language
max_src_length
The maximal length of the source sequence.
If it's negative, we will use treat it as not set.
max_tgt_length
The maximal length of the target sequence.
If it's negative, we will use treat it as not set.
scale_embed
Whether to multiply the src and dst embeddings by sqrt(units)
pos_embed_type
Type of the positional embedding
shared_embed
Whether to share the embedding of the src and tgt language
tie_weights
Whether to tie the weights of input + output.
activation_dropout
The ratio of the activation dropout in FFN
dropout
The default dropout ratio
attention_dropout
The ratio of the attention dropout
layer_norm_eps
The epsilon of the layer normalization
data_norm
Whether to add layer normalization layer after the input.
enc_units
Units of the encoder
enc_hidden_size
Hidden size of the encoder
enc_num_heads
Number of heads of the encoder
enc_num_layers
Number of layers of the encoder
enc_recurrent
Whether to use recurrent encoder (share weights)
enc_activation
Activation of the encoder layer
enc_pre_norm
Whether to add layer_norm before self-attention in the encoder
enc_use_qkv_bias
Wether to use bias for attention layer in the encoder
dec_units
Units of the decoder
dec_hidden_size
Hidden size of the decoder
dec_num_heads
Number of heads of the decoder
dec_num_layers
Number of layers of the decoder
dec_recurrent
Whether to use recurrent decoder (share weights)
dec_activation
Activation of the decoder layer
dec_pre_norm
Whether to add layer_norm before self-attention in the decoder
dec_use_qkv_bias
Wether to use bias for attention layer in the decoder
embed_initializer
Initializer of the embedding layer
weight_initializer
Initializer of the weight
bias_initializer
Initializer of the bias
dtype
Data type of the weights
layout
The layout of the input + target
"""
super().__init__()
assert src_vocab_size > 0 and tgt_vocab_size > 0,\
'Cannot set "src_vocab_size" and "tgt_vocab_size" to negative numbers. ' \
'Are you creating ' \
'the model with the config from TransformerModel.get_cfg()? If that is ' \
'the case, you will need to set the cfg.MODEL.src_vocab_size and ' \
'cfg.MODEL.tgt_vocab_size manually before passing to ' \
'TransformerModel.from_cfg().'
self._dtype = dtype
self._src_vocab_size = src_vocab_size
self._tgt_vocab_size = tgt_vocab_size
self.tie_weights = tie_weights
self.pos_embed_type = pos_embed_type
self.scaled_embed = scale_embed
self.enc_units = enc_units
self.dec_units = dec_units
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
if max_src_length is not None and max_src_length < 0:
max_src_length = None
if max_tgt_length is not None and max_tgt_length < 0:
max_tgt_length = None
if enc_units != dec_units:
assert shared_embed is False, 'Cannot share embedding when the enc_units and dec_units ' \
'are different! enc_units={},' \
' dec_units={}'.format(enc_units, dec_units)
self.src_embed_layer = nn.Embedding(input_dim=src_vocab_size,
output_dim=enc_units,
weight_initializer=embed_initializer,
dtype=self._dtype)
self.tgt_embed_layer = nn.Embedding(input_dim=tgt_vocab_size,
output_dim=dec_units,
weight_initializer=embed_initializer,
dtype=self._dtype)
if shared_embed:
self.tgt_embed_layer.weight = self.src_embed_layer.weight
if pos_embed_type is not None:
self.src_pos_embed_layer = PositionalEmbedding(units=enc_units,
max_length=max_src_length,
dtype=self._dtype,
method=pos_embed_type)
self.tgt_pos_embed_layer = PositionalEmbedding(units=dec_units,
max_length=max_tgt_length,
dtype=self._dtype,
method=pos_embed_type)
self.encoder = TransformerEncoder(num_layers=enc_num_layers,
recurrent=enc_recurrent,
units=enc_units,
hidden_size=enc_hidden_size,
num_heads=enc_num_heads,
activation_dropout=activation_dropout,
use_qkv_bias=enc_use_qkv_bias,
dropout=dropout,
attention_dropout=attention_dropout,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=enc_activation,
data_norm=data_norm,
pre_norm=enc_pre_norm,
dtype=self._dtype,
layout=layout)
self.decoder = TransformerDecoder(num_layers=dec_num_layers,
recurrent=dec_recurrent,
units=dec_units,
mem_units=enc_units,
hidden_size=dec_hidden_size,
num_heads=dec_num_heads,
activation_dropout=activation_dropout,
use_qkv_bias=dec_use_qkv_bias,
dropout=dropout,
attention_dropout=attention_dropout,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=dec_activation,
data_norm=data_norm,
pre_norm=dec_pre_norm,
dtype=self._dtype,
layout=layout)
if tie_weights:
self.tgt_final_layer = \
nn.Dense(units=tgt_vocab_size,
flatten=False,
in_units=self.dec_units,
bias_initializer=bias_initializer,
use_bias=False,
dtype=self._dtype)
self.tgt_final_layer.weight = self.tgt_embed_layer.weight
else:
self.tgt_final_layer = \
nn.Dense(tgt_vocab_size,
flatten=False,
in_units=self.dec_units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_bias=False,
dtype=self._dtype)
|
Parameters
----------
src_vocab_size
The vocabulary size of the source language
tgt_vocab_size
The vocabulary size of the target language
max_src_length
The maximal length of the source sequence.
If it's negative, we will use treat it as not set.
max_tgt_length
The maximal length of the target sequence.
If it's negative, we will use treat it as not set.
scale_embed
Whether to multiply the src and dst embeddings by sqrt(units)
pos_embed_type
Type of the positional embedding
shared_embed
Whether to share the embedding of the src and tgt language
tie_weights
Whether to tie the weights of input + output.
activation_dropout
The ratio of the activation dropout in FFN
dropout
The default dropout ratio
attention_dropout
The ratio of the attention dropout
layer_norm_eps
The epsilon of the layer normalization
data_norm
Whether to add layer normalization layer after the input.
enc_units
Units of the encoder
enc_hidden_size
Hidden size of the encoder
enc_num_heads
Number of heads of the encoder
enc_num_layers
Number of layers of the encoder
enc_recurrent
Whether to use recurrent encoder (share weights)
enc_activation
Activation of the encoder layer
enc_pre_norm
Whether to add layer_norm before self-attention in the encoder
enc_use_qkv_bias
Wether to use bias for attention layer in the encoder
dec_units
Units of the decoder
dec_hidden_size
Hidden size of the decoder
dec_num_heads
Number of heads of the decoder
dec_num_layers
Number of layers of the decoder
dec_recurrent
Whether to use recurrent decoder (share weights)
dec_activation
Activation of the decoder layer
dec_pre_norm
Whether to add layer_norm before self-attention in the decoder
dec_use_qkv_bias
Wether to use bias for attention layer in the decoder
embed_initializer
Initializer of the embedding layer
weight_initializer
Initializer of the weight
bias_initializer
Initializer of the bias
dtype
Data type of the weights
layout
The layout of the input + target
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def encode(self, src_data, src_valid_length):
"""Encode the source data to memory
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
"""
src_data = self.src_embed_layer(src_data)
if self.scaled_embed:
src_data = src_data * _np.sqrt(self.enc_units)
if self.pos_embed_type is not None:
if self.layout == 'NT':
src_data = src_data + self.src_pos_embed_layer(npx.arange_like(src_data, axis=1))
else:
src_data = src_data + np.expand_dims(self.src_pos_embed_layer(
npx.arange_like(src_data, axis=0)), axis=1)
enc_out = self.encoder(src_data, src_valid_length)
return enc_out
|
Encode the source data to memory
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
|
encode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def decode_seq(self, tgt_data, tgt_valid_length, mem_data, mem_valid_length):
"""Decode a sequence of inputs
Parameters
----------
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
mem_valid_length :
Shape (batch_size,)
Returns
-------
dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
"""
tgt_data = self.tgt_embed_layer(tgt_data)
if self.scaled_embed:
tgt_data = tgt_data * _np.sqrt(self.dec_units)
if self.pos_embed_type is not None:
if self.layout == 'NT':
tgt_data = tgt_data + self.tgt_pos_embed_layer(
npx.arange_like(tgt_data, axis=1))
else:
tgt_data = tgt_data + np.expand_dims(self.tgt_pos_embed_layer(
npx.arange_like(tgt_data, axis=0)), axis=1)
dec_out = self.decoder(tgt_data, tgt_valid_length, mem_data, mem_valid_length)
return dec_out
|
Decode a sequence of inputs
Parameters
----------
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
mem_valid_length :
Shape (batch_size,)
Returns
-------
dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
|
decode_seq
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length):
"""
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
"""
enc_out = self.encode(src_data, src_valid_length)
dec_out = self.decode_seq(tgt_data, tgt_valid_length, enc_out, src_valid_length)
dec_out = self.tgt_final_layer(dec_out)
return dec_out
|
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def state_batch_axis(self) -> Tuple[int, int, int, List]:
"""Return a data structure that stores the batch axis of the internal states
of the inference model.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axis
"""
if self.model.layout == 'NT':
return 0, 0, 0, self.model.decoder.state_batch_axis
else:
return 1, 0, 0, self.model.decoder.state_batch_axis
|
Return a data structure that stores the batch axis of the internal states
of the inference model.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axis
|
state_batch_axis
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def init_states(self, src_data, src_valid_length): # TODO(sxjscience) Revisit here, support auxiliary states?
"""Initialize the states required for incremental decoding
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_mem)
- layout = 'TN'
Shape (src_length, batch_size, C_mem)
src_valid_length
Shape (batch_size,)
position
Shape (batch_size,)
dec_states: list
The states of the decoder
"""
if self.model.layout == 'NT':
batch_size = src_data.shape[0]
else:
batch_size = src_data.shape[1]
ctx = src_data.ctx
enc_out = self.model.encode(src_data, src_valid_length)
position = mx.np.zeros((batch_size,), dtype=np.int32, ctx=ctx)
dtype = enc_out.dtype
dec_states = self.model.decoder.init_states(batch_size, ctx, dtype)
return enc_out, src_valid_length, position, dec_states
|
Initialize the states required for incremental decoding
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_mem)
- layout = 'TN'
Shape (src_length, batch_size, C_mem)
src_valid_length
Shape (batch_size,)
position
Shape (batch_size,)
dec_states: list
The states of the decoder
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, step_data, states):
"""
Parameters
----------
step_data
Shape (batch_size,)
states
It includes :
- layout = 'NT'
- mem_data : (batch_size, src_length, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
- layout = 'TN'
- mem_data : (src_length, batch_size, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
Returns
-------
out
Shape (batch_size, C)
new_states
Has the same structure as the states
"""
mem_data, mem_valid_length, position, dec_states = states
# 1. Get the embedding
step_data = self.model.tgt_embed_layer(step_data)
if self.model.scaled_embed:
step_data = step_data * _np.sqrt(self.model.dec_units)
if self.model.pos_embed_type is not None:
step_data = step_data + self.model.tgt_pos_embed_layer(position)
out, new_states =\
self.model.decoder.incremental_decode(step_data, dec_states,
mem_data, mem_valid_length)
out = self.model.tgt_final_layer(out)
return out, (mem_data, mem_valid_length, position + 1, new_states)
|
Parameters
----------
step_data
Shape (batch_size,)
states
It includes :
- layout = 'NT'
- mem_data : (batch_size, src_length, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
- layout = 'TN'
- mem_data : (src_length, batch_size, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
Returns
-------
out
Shape (batch_size, C)
new_states
Has the same structure as the states
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py
|
Apache-2.0
|
def forward(self, data, mem, rel_positions, mask, query_r_bias, query_k_bias):
"""
Parameters
----------
data
The input data.
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
mem
The memory.
- layout = 'NT'
Shape (batch_size, mem_length, units)
- layout = 'TN'
Shape (mem_length, batch_size, units)
rel_positions
The relative positions between data and concat(mem, data).
Shape is (query_length, mem_length + query_length).
A positive value means that query is after the memory, i.e.,
query_location - mem_location.
mask
Mask between the query and the memory + query.
1--> will be used, 0 --> won't be used
Shape (batch_size, query_length, mem_length + query_length)
query_r_bias
The query bias for calculating the relative scores
Shape (num_heads, query_head_units)
query_k_bias
The key bias for calculating the relative scores.
Shape (num_heads, query_head_units)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
"""
if self._layout == 'NT':
context = np.concatenate([mem, data], axis=1)
elif self._layout == 'TN':
context = np.concatenate([mem, data], axis=0)
else:
raise NotImplementedError
if self._pre_norm:
query = self.attn_query(self.layer_norm(data))
key_value = self.attn_kv(self.layer_norm(context))
key, value = np.split(key_value, 2, axis=-1)
else:
query = self.attn_query(data)
key_value = self.attn_kv(context)
key, value = np.split(key_value, 2, axis=-1)
query = npx.reshape(query, (-2, -2, self._num_heads, -1))
key = npx.reshape(key, (-2, -2, self._num_heads, -1))
value = npx.reshape(value, (-2, -2, self._num_heads, -1))
# Compute attention
rel_score = self.rel_pos_score_cell(rel_positions, query + query_r_bias)
out, _ = self.attn_cell(query + query_k_bias, key, value, mask, rel_score)
out = self.dropout_layer(out)
if self._pre_norm:
out = data + out
else:
out = self.layer_norm(data + out)
out = self.ffn(out)
return out
|
Parameters
----------
data
The input data.
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
mem
The memory.
- layout = 'NT'
Shape (batch_size, mem_length, units)
- layout = 'TN'
Shape (mem_length, batch_size, units)
rel_positions
The relative positions between data and concat(mem, data).
Shape is (query_length, mem_length + query_length).
A positive value means that query is after the memory, i.e.,
query_location - mem_location.
mask
Mask between the query and the memory + query.
1--> will be used, 0 --> won't be used
Shape (batch_size, query_length, mem_length + query_length)
query_r_bias
The query bias for calculating the relative scores
Shape (num_heads, query_head_units)
query_k_bias
The key bias for calculating the relative scores.
Shape (num_heads, query_head_units)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
def forward(self, data, mem_l, rel_positions, mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, query_length)
- layout = 'TN'
Shape (query_length, batch_size)
mem_l
Contains a list of memory objects, each one will contain:
- layout = 'NT'
Shape (batch_size, mem_length, C_i)
- layout = 'TN'
Shape (mem_length, batch_size, C_i)
rel_positions
The relative positions.
Shape (query_length, mem_length + query_length)
mask
Mask between the query and the memory + query.
Shape (batch_size, query_length, mem_length + query_length)
Returns
-------
out_l
Contains a list of hidden states, each will contain:
- layout = 'NT'
Shape (batch_size, query_length, C_o)
- layout = 'TN'
Shape (query_length, batch_size, C_o)
"""
query_k_bias = self.query_k_bias.data()
query_r_bias = self.query_r_bias.data()
out_l = []
out = data
for i, layer in enumerate(self.decoder_layers):
out = layer(out, mem_l[i], rel_positions, mask, query_r_bias, query_k_bias)
out_l.append(out)
return out_l
|
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, query_length)
- layout = 'TN'
Shape (query_length, batch_size)
mem_l
Contains a list of memory objects, each one will contain:
- layout = 'NT'
Shape (batch_size, mem_length, C_i)
- layout = 'TN'
Shape (mem_length, batch_size, C_i)
rel_positions
The relative positions.
Shape (query_length, mem_length + query_length)
mask
Mask between the query and the memory + query.
Shape (batch_size, query_length, mem_length + query_length)
Returns
-------
out_l
Contains a list of hidden states, each will contain:
- layout = 'NT'
Shape (batch_size, query_length, C_o)
- layout = 'TN'
Shape (query_length, batch_size, C_o)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
def init_states(self, batch_size, ctx):
"""Initialize the states
Parameters
----------
batch_size
ctx
ctx of the initialized
Returns
-------
mems
A list of memory states
- layout = 'NT'
Shape (B, T, C)
- layout = 'TN'
Shape (T, B, C)
"""
if self._layout == 'NT':
return [mx.np.zeros((batch_size, 0, self._units), ctx=ctx)
for _ in range(self._num_layers)]
elif self._layout == 'TN':
return [mx.np.zeros((0, batch_size, self._units), ctx=ctx)
for _ in range(self._num_layers)]
else:
raise NotImplementedError
|
Initialize the states
Parameters
----------
batch_size
ctx
ctx of the initialized
Returns
-------
mems
A list of memory states
- layout = 'NT'
Shape (B, T, C)
- layout = 'TN'
Shape (T, B, C)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.