code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def set_mem_length(self, mem_length: int):
"""
Parameters
----------
mem_length
The memory length of the model
"""
self._cfg.defrost()
self._cfg.MODEL.mem_length = mem_length
self._cfg.freeze()
|
Parameters
----------
mem_length
The memory length of the model
|
set_mem_length
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
def forward(self, data, target, mem_l, rel_positions=None, data_mem_mask=None,
causal_only=False, detach_memory=True):
"""
Parameters
----------
data
The input data
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
target
The ground truth
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
rel_positions
Shape (query_length, mem_length + query_length)
By default, we will use the following relative positions
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'in': 5, 4, 3, 2, 1, 0, -1, -2
'Gluon@@': 6, 5, 4, 3, 2, 1, 0, -1
'NLP': 7, 6, 5, 4, 3, 2, 1, 0
data_mem_mask
Shape (B, query_length, mem_length + query_length)
Here, 1 --> will be used, 0 --> won't be used.
By default, we will mask all locations that have distance > mem_length with the
current token.
Following is an example in which query_length = 3, mem_length = 4
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 0, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 0, 0, 1, 1, 1, 1, 1, 0
'NLP': 0, 0, 0, 1, 1, 1, 1, 1
Also, we provide the option in which we only mask the future tokens, this is
supported by setting `causal_only` to True. However, there will be a
discrepancy between training and inference because the effecitve memory length is
longer for the later tokens in the query.
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
causal_only
Whether to ignore the local masking constraint. See the flag above for more information.
detach_memory
Whether to detach the encoded memory from the graph.
Returns
-------
logits
The selected logits
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
new_mem_l
A list of the updated memory
- layout = 'NT'
Each will have shape (B, T, C)
- layout = 'TN'
Each will have shape (T, B, C)
"""
# Note that curr_mem_length will not necessarily be equal to mem_length
if self._layout == 'NT':
time_axis = 1
batch_axis = 0
elif self._layout == 'TN':
time_axis = 0
batch_axis = 1
else:
raise NotImplementedError
query_length = data.shape[time_axis]
curr_mem_length = mem_l[0].shape[time_axis]
batch_size = mem_l[0].shape[batch_axis]
ctx = data.ctx
local_attn_mask = mx.np.ones((batch_size, query_length, curr_mem_length + query_length),
dtype=np.int32, ctx=ctx)
if not causal_only:
# Generate the mask, we mask out the input outside the local self.mem_length window
local_attn_mask = mx.np.triu(mx.np.tril(local_attn_mask, curr_mem_length),
curr_mem_length - self.mem_length)
else:
local_attn_mask = mx.np.tril(local_attn_mask, curr_mem_length)
if data_mem_mask is None:
data_mem_mask = local_attn_mask
else:
data_mem_mask = data_mem_mask * local_attn_mask
if rel_positions is None:
query_ids = mx.np.arange(curr_mem_length, curr_mem_length + query_length,
dtype=np.int32, ctx=ctx)
mem_ids = mx.np.arange(0, curr_mem_length + query_length,
dtype=np.int32, ctx=ctx)
rel_positions = mx.np.expand_dims(query_ids, axis=1)\
- mx.np.expand_dims(mem_ids, axis=0)
# Get word embeddings
word_embeddings = self.word_emb(data)
word_embeddings = self.dropout_layer(word_embeddings)
out_l = self.decoder(word_embeddings, mem_l, rel_positions, data_mem_mask)
# Get the output logits
logits = self.crit(out_l[-1], target)
# Get the new memory
new_mem_l = []
for step_out, mem in zip([word_embeddings] + out_l, mem_l):
new_mem = mx.np.concatenate([mem, step_out], axis=time_axis)
if self._layout == 'NT':
new_mem = new_mem[:, -self.mem_length:]
elif self._layout == 'TN':
new_mem = new_mem[-self.mem_length:, :]
else:
raise NotImplementedError
if detach_memory:
new_mem_l.append(new_mem.detach())
else:
new_mem_l.append(new_mem)
return logits, new_mem_l
|
Parameters
----------
data
The input data
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
target
The ground truth
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
rel_positions
Shape (query_length, mem_length + query_length)
By default, we will use the following relative positions
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'in': 5, 4, 3, 2, 1, 0, -1, -2
'Gluon@@': 6, 5, 4, 3, 2, 1, 0, -1
'NLP': 7, 6, 5, 4, 3, 2, 1, 0
data_mem_mask
Shape (B, query_length, mem_length + query_length)
Here, 1 --> will be used, 0 --> won't be used.
By default, we will mask all locations that have distance > mem_length with the
current token.
Following is an example in which query_length = 3, mem_length = 4
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 0, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 0, 0, 1, 1, 1, 1, 1, 0
'NLP': 0, 0, 0, 1, 1, 1, 1, 1
Also, we provide the option in which we only mask the future tokens, this is
supported by setting `causal_only` to True. However, there will be a
discrepancy between training and inference because the effecitve memory length is
longer for the later tokens in the query.
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
causal_only
Whether to ignore the local masking constraint. See the flag above for more information.
detach_memory
Whether to detach the encoded memory from the graph.
Returns
-------
logits
The selected logits
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
new_mem_l
A list of the updated memory
- layout = 'NT'
Each will have shape (B, T, C)
- layout = 'TN'
Each will have shape (T, B, C)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
def step_forward(self, step_data, mem_l):
"""Forward for just one step
Parameters
----------
step_data
Shape (B,)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
Returns
-------
logits
Shape (B, V)
new_mem_l
A list of memory objects
- layout = 'NT'
Shape (B, min(T_mem + 1, memory_length), C)
- layout = 'TN'
Shape (min(T_mem + 1, memory_length), B, C)
"""
batch_size = step_data.shape[0]
if self._layout == 'NT':
curr_mem_length = mem_l[0].shape[1]
elif self._layout == 'TN':
curr_mem_length = mem_l[0].shape[0]
else:
raise NotImplementedError
ctx = step_data.ctx
mask = mx.np.ones((batch_size, 1, curr_mem_length + 1), dtype=np.int32, ctx=ctx)
rel_positions = mx.np.expand_dims(mx.np.arange(curr_mem_length, -1, -1, dtype=np.int32,
ctx=ctx), axis=0)
# Word embedding shape = (B, C)
word_embeddings = self.dropout_layer(self.word_emb(step_data))
if self._layout == 'NT':
word_embeddings = mx.np.expand_dims(word_embeddings, axis=1)
elif self._layout == 'TN':
word_embeddings = mx.np.expand_dims(word_embeddings, axis=0)
else:
raise NotImplementedError
out_l = self.decoder(word_embeddings, mem_l, rel_positions, mask)
# Get logits
if self._layout == 'NT':
final_out = out_l[-1][:, 0]
elif self._layout == 'TN':
final_out = out_l[-1][0, :]
else:
raise NotImplementedError
logits = self.crit.get_logits(final_out)
# Update memory
new_mem_l = []
for step_out, mem in zip([word_embeddings] + out_l, mem_l):
if self._layout == 'NT':
new_mem = mx.np.concatenate([mem, step_out], axis=1)
new_mem = new_mem[:, -self.mem_length:]
elif self._layout == 'TN':
new_mem = mx.np.concatenate([mem, step_out], axis=0)
new_mem = new_mem[-self.mem_length:, :]
else:
raise NotImplementedError
new_mem_l.append(new_mem)
return logits, new_mem_l
|
Forward for just one step
Parameters
----------
step_data
Shape (B,)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
Returns
-------
logits
Shape (B, V)
new_mem_l
A list of memory objects
- layout = 'NT'
Shape (B, min(T_mem + 1, memory_length), C)
- layout = 'TN'
Shape (min(T_mem + 1, memory_length), B, C)
|
step_forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/transformer_xl.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py
|
Apache-2.0
|
def get_pretrained_xlmr(model_name: str = 'fairseq_xlmr_base',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_mlm: bool = False) \
-> Tuple[CN, SentencepieceTokenizer, str, str]:
"""Get the pretrained XLM-R weights
Parameters
----------
model_name
The name of the xlmr model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_xlmr())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
sp_model_path = PRETRAINED_URL[model_name]['sentencepiece.model']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('sentencepiece.model', sp_model_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = SentencepieceTokenizer(
model_path=local_paths['sentencepiece.model'],
lowercase=do_lower)
if cfg is None:
cfg = XLMRModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path
|
Get the pretrained XLM-R weights
Parameters
----------
model_name
The name of the xlmr model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
|
get_pretrained_xlmr
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/models/xlmr.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/xlmr.py
|
Apache-2.0
|
def gen_self_attn_mask(data,
valid_length=None,
attn_type: str = 'full',
layout: str = 'NT'):
"""Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
"""
device = data.device
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
if attn_type == 'full':
if valid_length is not None:
steps = th.arange(data.shape[time_axis], device=device) # (seq_length,)
mask1 = (steps.view((1, 1, -1))
< valid_length.view((valid_length.shape[0], 1, 1)))
mask2 = (steps.view((1, -1, 1))
< valid_length.view((valid_length.shape[0], 1, 1)))
mask = mask1 * mask2
else:
seq_len_ones = th.ones((data.shape[time_axis],), device=device) # (seq_length,)
batch_ones = th.ones((data.shape[batch_axis],), device=device) # (batch_size,)
mask = batch_ones.view((-1, 1, 1)) * seq_len_ones.view((1, -1, 1))\
* seq_len_ones.view((1, 1, -1))
elif attn_type == 'causal':
steps = th.arange(data.shape[time_axis], device=device)
# mask: (seq_length, seq_length)
# batch_mask: (batch_size, seq_length)
mask = th.unsqueeze(steps, dim=0) <= th.unsqueeze(steps, dim=1)
if valid_length is not None:
batch_mask = th.unsqueeze(steps, dim=0) < th.unsqueeze(valid_length, dim=-1)
mask = mask * th.unsqueeze(batch_mask, dim=-1)
else:
batch_ones = th.ones(data.shape[batch_axis], device=device)
mask = mask * batch_ones.view((-1, 1, 1))
else:
raise NotImplementedError
return mask.type(th.bool)
|
Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
|
gen_self_attn_mask
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/attention_cell.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
|
Apache-2.0
|
def gen_mem_attn_mask(mem, mem_valid_length, data, data_valid_length=None,
layout: str = 'NT'):
"""Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
layout
Layout of the data + mem tensor
Returns
-------
mask
Shape (batch_size, query_length, mem_length)
"""
device = mem.device
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
batch_size = mem.shape[batch_axis]
mem_length = mem.shape[time_axis]
query_length = data[time_axis]
mem_steps = th.arange(mem_length, device=device) # (mem_length,)
data_steps = th.arange(data.shape[time_axis], device=device) # (query_length,)
# mem_mask will have shape (B, 1, mem_length)
mem_mask = mem_steps.view((1, 1, mem_length)) < mem_valid_length.view((batch_size, 1, 1))
if data_valid_length is not None:
# (B, query_length, 1)
data_mask = (data_steps.view((1, -1, 1))
< data_valid_length.view((batch_size, 1, 1)))
mask = mem_mask * data_mask
else:
mask = mem_mask.expand(batch_size, query_length, -1)
return mask.type(th.bool)
|
Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
layout
Layout of the data + mem tensor
Returns
-------
mask
Shape (batch_size, query_length, mem_length)
|
gen_mem_attn_mask
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/attention_cell.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
|
Apache-2.0
|
def masked_softmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
"""
if mask is not None:
# Fill in the masked scores with a very small value
if att_score.dtype == th.float16:
att_score = att_score.masked_fill(th.logical_not(mask), -1E4)
else:
att_score = att_score.masked_fill(th.logical_not(mask), -1E18)
att_weights = th.softmax(att_score, dim=axis) * mask
else:
att_weights = th.softmax(att_score, dim=axis)
return att_weights
|
Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
|
masked_softmax
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/attention_cell.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
|
Apache-2.0
|
def multi_head_dot_attn(query, key, value,
mask=None,
edge_scores=None,
dropout: float = 0.0,
scaled: bool = True, normalized: bool = False,
eps: float = 1E-6,
layout: str = 'NKT',
use_einsum: bool = None, *, training: bool = True):
"""Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
"""
if use_einsum is None:
use_einsum = use_einsum_optimization()
# TODO(sxjscience) Profile layout
if normalized:
query = F.normalize(query, p=2, dim=-1, eps=eps)
key = F.normalize(key, p=2, dim=-1, eps=eps)
if scaled:
scale = math.sqrt(query.shape[-1])
else:
scale = None
if layout == 'NKT':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (B, N, L_query, C_Q) X (B, N, L_mem, C_Q) --> (B, N, L_query, L_mem)
scores = th.matmul(query, th.transpose(key, -2, -1))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask, axis=-1)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, N, L_mem, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = th.einsum('bnij,bnjc->binc', attn_weights, value)
else:
context_vec = th.transpose(th.matmul(attn_weights, value), 1, 2)
context_vec = th.reshape(context_vec,
(context_vec.shape[0], context_vec.shape[1], -1))
elif layout == 'NTK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (B, L_query, N, C_Q) X (B, L_mem, N, C_Q) --> (B, N, L_query, L_mem)
if use_einsum:
scores = th.einsum('binc,bjnc->bnij', query, key)
else:
scores = th.matmul(th.transpose(query, 1, 2), key.permute(0, 2, 3, 1))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, L_mem, N, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = th.einsum('bnij,bjnc->binc', attn_weights, value)
else:
context_vec = th.matmul(attn_weights, th.transpose(value, 1, 2)).permute(0, 2, 1, 3)
context_vec = th.reshape(context_vec, (context_vec.shape[0], context_vec.shape[1], -1))
elif layout == 'TNK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (L_query, B, N, C_Q) X (L_mem, B, N, C_Q) --> (B, N, L_query, L_mem)
# This layout structure can be implemented very efficiently because B, N are consecutive
# to each other. To have a clear picture of what's happening, we may consider the
# (i, j)th element of the output
# out[i, j, :, :] = query[:, i, j, :] X key[:, i, j, :].T, which is just one GEMM call
# We can thus implement the whole kernel via a single call of batched GEMM with stride.
if use_einsum:
scores = th.einsum('ibnc,jbnc->bnij', query, key)
else:
scores = th.matmul(query.permute(1, 2, 0, 3),
key.permute(1, 2, 3, 0))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (L_mem, B, N, C_V) --> (L_query, B, N * C_V)
# Again, we can implement it via a single call to batched GEMM with stride.
# Shape (B, N, L_query, C_V)
if use_einsum:
context_vec = th.einsum('bnij,jbnc->ibnc', attn_weights, value)
else:
context_vec = th.matmul(attn_weights,
value.permute(1, 2, 0, 3)).permute(2, 0, 1, 3)
context_vec = th.reshape(context_vec, (context_vec.shape[0], context_vec.shape[1], -1))
else:
raise NotImplementedError('layout="{}" is not supported! '
'We only support layout = "NKT", "NTK", and "TNK".'
.format(layout))
return context_vec, [scores, attn_weights]
|
Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
|
multi_head_dot_attn
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/attention_cell.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py
|
Apache-2.0
|
def relative_position_bucket(relative_position, bidirectional: bool = True, num_buckets: int = 32,
max_distance: int = 128):
"""Map the relative position to buckets.
The major difference between our implementation and
that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
is that we use 'query_i - mem_j' as the (i, j)-th location in relative_position.
Thus, a positive value means that the query slot is in a later timestamp than the memory slot.
However, in mesh transformer, it is treated as `mem_i - query_j` (reversed).
The implementation uses the first half of the bucket (num_buckets // 2) to store the
exact increments in positions and the second half of the bucket
(num_buckets - num_buckets // 2) to store the bucketing values in the logarithm order.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, we will use the first half to map the positions of the
positive shifts and the second half to map the positions of the negative shifts.
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
"""
ret = 0
if bidirectional:
assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \
'divisible by 2.'
num_buckets //= 2
ret = ret + (relative_position < 0).astype(th.int32) * num_buckets
relative_position = th.abs(relative_position)
else:
# Clip all the negative values to 0
relative_position = th.clip(relative_position, min=0, max=None)
# Now, the relative_position is in the range [0, inf)
# Half of the buckets deal with the exact increments,
# i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to
# max_distance
val_if_large = max_exact + (th.log(relative_position.type(th.float32) / max_exact) /
math.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(th.int32)
val_if_large = th.minimum(val_if_large, th.tensor(num_buckets - 1))
ret = ret + th.where(is_small, relative_position, val_if_large)
return ret
|
Map the relative position to buckets.
The major difference between our implementation and
that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
is that we use 'query_i - mem_j' as the (i, j)-th location in relative_position.
Thus, a positive value means that the query slot is in a later timestamp than the memory slot.
However, in mesh transformer, it is treated as `mem_i - query_j` (reversed).
The implementation uses the first half of the bucket (num_buckets // 2) to store the
exact increments in positions and the second half of the bucket
(num_buckets - num_buckets // 2) to store the bucketing values in the logarithm order.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, we will use the first half to map the positions of the
positive shifts and the second half to map the positions of the negative shifts.
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
|
relative_position_bucket
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def get_activation(act, inplace=False):
"""
Parameters
----------
act
Name of the activation
inplace
Whether to perform inplace activation
Returns
-------
activation_layer
The activation
"""
if act is None:
return lambda x: x
if isinstance(act, str):
if act == 'leaky':
# TODO(sxjscience) Add regex matching here to parse `leaky(0.1)`
return nn.LeakyReLU(0.1, inplace=inplace)
elif act == 'identity':
return nn.Identity()
elif act == 'elu':
return nn.ELU(inplace=inplace)
elif act == 'gelu':
return nn.GELU()
elif act == 'gelu(tanh)':
return GELU_TANH()
elif act == 'relu':
return nn.ReLU()
elif act == 'sigmoid':
return nn.Sigmoid()
elif act == 'tanh':
return nn.Tanh()
elif act == 'softrelu' or act == 'softplus':
return nn.Softplus()
elif act == 'softsign':
return nn.Softsign()
else:
raise NotImplementedError('act="{}" is not supported. '
'Try to include it if you can find that in '
'https://pytorch.org/docs/stable/nn.html'.format(act))
else:
return act
|
Parameters
----------
act
Name of the activation
inplace
Whether to perform inplace activation
Returns
-------
activation_layer
The activation
|
get_activation
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def get_norm_layer(normalization: str = 'layer_norm', axis: int = -1, epsilon: float = 1e-5,
in_channels: int = 0, **kwargs):
"""Get the normalization layer based on the provided type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
"""
if isinstance(normalization, str):
if normalization == 'layer_norm':
assert in_channels > 0
assert axis == -1
norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)
else:
raise NotImplementedError('normalization={} is not supported'.format(normalization))
return norm_layer
else:
raise NotImplementedError('The type of normalization must be str')
|
Get the normalization layer based on the provided type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
|
get_norm_layer
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def __init__(self, units: int = 512, hidden_size: int = 2048, activation_dropout: float = 0.0,
dropout: float = 0.1, gated_proj: bool = False, activation='relu',
normalization: str = 'layer_norm', layer_norm_eps: float = 1E-5,
pre_norm: bool = False):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm),
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size, bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units, out_features=hidden_size, bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units, bias=True)
self.layer_norm = get_norm_layer(normalization=normalization, in_channels=units,
epsilon=layer_norm_eps)
self.init_weights()
|
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def forward(self, data):
"""
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
if self._gated_proj:
out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data)
else:
out = self.activation(self.ffn_1(data))
out = self.activation_dropout_layer(out)
out = self.ffn_2(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
return out
|
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def __init__(self, units: int, learnable=False):
"""Use a geometric sequence of timescales.
It is calculated as
[sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...]
By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1))
Parameters
----------
units
The number of units for positional embedding
learnable
Whether to make the Sinusoidal positional embedding learnable.
If it is turned on, we will also update the frequency of this layer.
See "[ICLR2021] On Position Embeddings in BERT" for more detail.
"""
super().__init__()
def _init_sinusoidal_base(units):
half_units = units // 2
val = np.log(10000) / (half_units - 1)
val = np.exp(np.arange(half_units, dtype=np.float32) * -val)
return val
default_sinusoidal_base = _init_sinusoidal_base(units)
self.freq = nn.Parameter(data=th.tensor(default_sinusoidal_base), requires_grad=learnable)
self._units = units
self._learnable = learnable
|
Use a geometric sequence of timescales.
It is calculated as
[sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...]
By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1))
Parameters
----------
units
The number of units for positional embedding
learnable
Whether to make the Sinusoidal positional embedding learnable.
If it is turned on, we will also update the frequency of this layer.
See "[ICLR2021] On Position Embeddings in BERT" for more detail.
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def forward(self, positions):
"""
Parameters
----------
positions : th.Tensor
Shape (..., )
Returns
-------
ret :
Shape (..., units)
"""
emb = positions.unsqueeze(-1) * self.freq
sin_emb = th.sin(emb)
cos_emb = th.cos(emb)
if self._units % 2 == 0:
return th.cat([sin_emb, cos_emb], dim=-1)
else:
return th.cat([sin_emb, cos_emb, th.zeros_like(positions).unsqueeze(-1)], dim=-1)
|
Parameters
----------
positions : th.Tensor
Shape (..., )
Returns
-------
ret :
Shape (..., units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/layers.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py
|
Apache-2.0
|
def to_torch_dtype(dtype):
"""Convert the dtype to pytorch data type
Parameters
----------
dtype
The input dtype
Returns
-------
ret
Converted dtype
"""
if isinstance(dtype, th.dtype) or dtype is None:
return dtype
dtype = np.dtype(dtype)
if dtype in numpy_to_torch_dtype_dict:
return numpy_to_torch_dtype_dict[dtype]
else:
raise KeyError(f'dtype = {dtype} is not supported for conversion')
|
Convert the dtype to pytorch data type
Parameters
----------
dtype
The input dtype
Returns
-------
ret
Converted dtype
|
to_torch_dtype
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
|
Apache-2.0
|
def to_numpy_dtype(dtype):
"""Convert the dtype to numpy dtype
Parameters
----------
dtype
Input dtype
Returns
-------
ret
The converted dtype
"""
if dtype is None:
return None
if dtype in torch_dtype_to_numpy_dict:
return torch_dtype_to_numpy_dict[dtype]
else:
return np.dtype(dtype)
|
Convert the dtype to numpy dtype
Parameters
----------
dtype
Input dtype
Returns
-------
ret
The converted dtype
|
to_numpy_dtype
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
|
Apache-2.0
|
def share_parameters(source, target):
"""Share parameters recursively from source model to target model.
For example, if you want ``dense1`` to share ``dense0``'s weights, you can do::
dense0 = nn.Linear(20)
dense1 = nn.Linear(20)
share_parameters(dense0, dense)
which equals to
dense1.weight = dense0.weight
dense1.bias = dense0.bias
Parameters
----------
source : nn.Module
target : nn.Module
"""
def _named_members(module, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
"""
modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None:
continue
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
source_names = set(n for n, p in _named_members(source, lambda m: m._parameters.items()))
target_names = set(n for n, p in _named_members(target, lambda m: m._parameters.items()))
if not source_names == target_names:
raise ValueError(
'Source and target modules do not have the same set of parameters. '
f'The following parameters are missing from target: "{source_names - target_names}"'
f'The following parameters are missing from source: "{target_names - source_names}"')
for name in source_names:
module_names = name.split('.')
weight_name = module_names.pop()
tmp_source, tmp_target = source, target
for module_name in module_names:
tmp_source = tmp_source._modules[module_name]
tmp_target = tmp_target._modules[module_name]
setattr(tmp_target, weight_name, getattr(tmp_source, weight_name))
|
Share parameters recursively from source model to target model.
For example, if you want ``dense1`` to share ``dense0``'s weights, you can do::
dense0 = nn.Linear(20)
dense1 = nn.Linear(20)
share_parameters(dense0, dense)
which equals to
dense1.weight = dense0.weight
dense1.bias = dense0.bias
Parameters
----------
source : nn.Module
target : nn.Module
|
share_parameters
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
|
Apache-2.0
|
def _named_members(module, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
"""
modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None:
continue
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
|
Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
|
_named_members
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
|
Apache-2.0
|
def move_to(obj, device=None):
"""
Parameters
----------
obj
Nested torch object
device
The target device
Returns
-------
new_obj
The objects that have been moved to device.
"""
if th.is_tensor(obj):
return obj.to(device)
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
res[k] = move_to(v, device)
return res
elif isinstance(obj, (list, tuple)):
res = []
for v in obj:
res.append(move_to(v, device))
if isinstance(obj, tuple):
res = tuple(res)
return res
else:
raise TypeError("Invalid type for move_to")
|
Parameters
----------
obj
Nested torch object
device
The target device
Returns
-------
new_obj
The objects that have been moved to device.
|
move_to
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py
|
Apache-2.0
|
def _pad_arrs_to_max_length(arrs, pad_val, dtype, batch_dim=0, round_to=None):
"""Inner Implementation of the Pad batchify
Parameters
----------
arrs
List of arrays
pad_val
The padding value
dtype
The type of the tensor
batch_dim
The dimension to insert the batch dimension.
This controls how we should construct the mini-batch.
round_to
To round the size of the arrays to the closest multiply of round_to.
This helps ensure the shape of the input tensor.
Returns
-------
ret : th.Tensor
The returned tensor
"""
# First step is to convert the arrays to torch tensor
if not isinstance(arrs[0], th.Tensor):
arrs = [th.tensor(ele, dtype=dtype) for ele in arrs]
dtype = arrs[0].dtype if dtype is None else dtype
max_shape = list(arrs[0].shape)
assert 0 <= batch_dim <= arrs[0].ndim
for pad_axis in range(len(max_shape)):
curr_lengths = [ele.shape[pad_axis] for ele in arrs]
max_size = max(curr_lengths)
if round_to is not None:
max_size = round_to * math.ceil(max_size / round_to)
max_shape[pad_axis] = max_size
ret_shape = tuple(max_shape[:batch_dim]) + (len(arrs), ) + tuple(max_shape[batch_dim:])
# Construct the full output
ret = th.full(size=ret_shape, fill_value=pad_val, dtype=dtype)
for i, arr in enumerate(arrs):
slices = [slice(None) for _ in range(len(max_shape))]
for j in range(len(max_shape)):
if arr.shape[j] < max_shape[j]:
slices[j] = slice(0, arr.shape[j])
slices.insert(batch_dim, i)
ret[tuple(slices)] = arr
return ret
|
Inner Implementation of the Pad batchify
Parameters
----------
arrs
List of arrays
pad_val
The padding value
dtype
The type of the tensor
batch_dim
The dimension to insert the batch dimension.
This controls how we should construct the mini-batch.
round_to
To round the size of the arrays to the closest multiply of round_to.
This helps ensure the shape of the input tensor.
Returns
-------
ret : th.Tensor
The returned tensor
|
_pad_arrs_to_max_length
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/data/batchify.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
|
Apache-2.0
|
def __call__(self, data):
"""Batchify the input data.
The input can be list of numpy.ndarray, list of numbers or list of
th.Tensor. The arrays will be padded to the largest dimension at `axis` and then
stacked to form the final output.
Parameters
----------
data : List[np.ndarray] or List[List[dtype]] or List[th.Tensor]
List of samples to pad and stack.
Returns
-------
batch_data: th.Tensor
Data in the minibatch.
If batch_dim = 0:
Shape (N, ...)
Otherwise, the N will be inserted to the location of the batch_dim, which will be
Shape (..., N, ...)
"""
_arr_cls = th.Tensor
if isinstance(data[0], (_arr_cls, np.ndarray, list)):
padded_arr = _pad_arrs_to_max_length(data, pad_val=self._pad_val, dtype=self._dtype,
batch_dim=self._axis, round_to=self._round_to)
return padded_arr
else:
raise NotImplementedError(
"Pad() does not support multiple items, use Group(Pad(), Pad(), ...) instead")
|
Batchify the input data.
The input can be list of numpy.ndarray, list of numbers or list of
th.Tensor. The arrays will be padded to the largest dimension at `axis` and then
stacked to form the final output.
Parameters
----------
data : List[np.ndarray] or List[List[dtype]] or List[th.Tensor]
List of samples to pad and stack.
Returns
-------
batch_data: th.Tensor
Data in the minibatch.
If batch_dim = 0:
Shape (N, ...)
Otherwise, the N will be inserted to the location of the batch_dim, which will be
Shape (..., N, ...)
|
__call__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/data/batchify.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
|
Apache-2.0
|
def _stack_arrs(arrs, batch_dim, dtype):
"""
Parameters
----------
arrs
batch_dim
The batch dimension
dtype
torch dtype
Returns
-------
stacked_arr
The resulting stacked array
"""
if isinstance(arrs[0], np.ndarray):
stacked_arr = np.stack(arrs, axis=batch_dim)
return th.as_tensor(stacked_arr, dtype=dtype)
elif isinstance(arrs[0], th.Tensor):
ret = th.stack(arrs, dim=batch_dim)
if dtype is None:
dtype = ret.dtype
if ret.dtype != dtype:
return ret.type(dtype)
else:
return ret
else:
stacked_arr = np.stack([np.array(arr) for arr in arrs], axis=batch_dim)
return th.as_tensor(stacked_arr, dtype=dtype)
|
Parameters
----------
arrs
batch_dim
The batch dimension
dtype
torch dtype
Returns
-------
stacked_arr
The resulting stacked array
|
_stack_arrs
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/data/batchify.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
|
Apache-2.0
|
def __call__(self, data: t_List[t_Dict]) -> t_Dict:
"""
Parameters
----------
data
The samples to batchify. Each sample should be a dictionary
Returns
-------
ret
The resulting dictionary that stores the merged samples.
"""
ret = dict()
for k, ele_fn in self._fn_dict.items():
ret[k] = ele_fn([ele[k] for ele in data])
return ret
|
Parameters
----------
data
The samples to batchify. Each sample should be a dictionary
Returns
-------
ret
The resulting dictionary that stores the merged samples.
|
__call__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/data/batchify.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
|
Apache-2.0
|
def __call__(self, data: t_List[t_NamedTuple]) -> t_NamedTuple:
"""Batchify the input data.
Parameters
----------
data
The samples to batchfy. Each sample should be a namedtuple.
Returns
-------
ret
A namedtuple of length N. Contains the batchified result of each attribute in the input.
"""
if not isinstance(data[0], self._container):
raise ValueError('The samples should have the same type as the stored namedtuple.'
' data[0]={}, container={}'.format(data[0], self._container))
ret = []
for i, ele_fn in enumerate(self._fn_l):
ret.append(ele_fn([ele[i] for ele in data]))
return self._container(*ret)
|
Batchify the input data.
Parameters
----------
data
The samples to batchfy. Each sample should be a namedtuple.
Returns
-------
ret
A namedtuple of length N. Contains the batchified result of each attribute in the input.
|
__call__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/data/batchify.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py
|
Apache-2.0
|
def forward(self, data, valid_length):
"""
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
F
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length, attn_type='full', layout=self.layout)
out = data
all_encodings_outputs = []
additional_outputs = []
for layer_idx in range(self._num_layers):
layer = self.all_layers[layer_idx]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units] or [seq_len, batch_size, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = sequence_mask(out, valid_len=valid_length, axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = sequence_mask(out, valid_len=valid_length, axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
F
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length):
# pylint: disable=arguments-differ
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output :
This is optional. Shape (batch_size, units)
"""
if token_types is None:
token_types = th.zeros_like(inputs)
initial_embedding = self.get_initial_embedding(inputs, token_types)
prev_out = initial_embedding
outputs = []
if self._compute_layout != self._layout:
# Swap the axes if the compute_layout and layout mismatch
contextual_embeddings, additional_outputs = self.encoder(th.transpose(prev_out, 0, 1),
valid_length)
contextual_embeddings = th.transpose(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(prev_out, valid_length)
outputs.append(contextual_embeddings)
if self.use_pooler:
pooled_out = self.apply_pooling(contextual_embeddings)
outputs.append(pooled_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output :
This is optional. Shape (batch_size, units)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If None, it will be initialized as all zero
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
embedding = self.word_embed(inputs)
if token_types is None:
token_types = th.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding = self.token_pos_embed(
th.arange(end=inputs.shape[time_axis], device=inputs.device))
positional_embedding = th.unsqueeze(positional_embedding, dim=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding
|
Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If None, it will be initialized as all zero
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
|
get_initial_embedding
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS]
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
return:
Shape (batch_size, units)
"""
if self.layout == 'NT':
outputs = sequence[:, 0, :]
else:
outputs = sequence[0, :, :]
return th.tanh(self.pooler(outputs))
|
Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS]
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
return:
Shape (batch_size, units)
|
apply_pooling
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def from_cfg(cls, cfg, use_pooler=True) -> 'BertModel':
"""
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
Returns
-------
ret
The constructed BertModel
"""
cfg = BertModel.get_cfg().clone_merge(cfg)
assert cfg.VERSION == 1, 'Wrong version!'
return cls(vocab_size=cfg.MODEL.vocab_size, units=cfg.MODEL.units,
hidden_size=cfg.MODEL.hidden_size, num_layers=cfg.MODEL.num_layers,
num_heads=cfg.MODEL.num_heads, max_length=cfg.MODEL.max_length,
hidden_dropout_prob=cfg.MODEL.hidden_dropout_prob,
attention_dropout_prob=cfg.MODEL.attention_dropout_prob,
num_token_types=cfg.MODEL.num_token_types,
pos_embed_type=cfg.MODEL.pos_embed_type, activation=cfg.MODEL.activation,
layer_norm_eps=cfg.MODEL.layer_norm_eps, use_pooler=use_pooler,
layout=cfg.MODEL.layout, compute_layout=cfg.MODEL.compute_layout)
|
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
Returns
-------
ret
The constructed BertModel
|
from_cfg
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def __init__(self, backbone_cfg):
"""
Parameters
----------
backbone_cfg
The cfg of the backbone model
"""
super().__init__()
self.backbone_model = BertModel.from_cfg(backbone_cfg)
# Construct nsp_classifier for next sentence prediction
self.nsp_classifier = th.nn.Linear(out_features=2, in_features=self.backbone_model.units)
self.mlm_decoder = th.nn.Sequential(
th.nn.Linear(out_features=self.backbone_model.units,
in_features=self.backbone_model.units),
get_activation(self.backbone_model.activation),
th.nn.LayerNorm(self.backbone_model.units, eps=self.backbone_model.layer_norm_eps),
th.nn.Linear(out_features=self.backbone_model.vocab_size,
in_features=self.backbone_model.units))
# TODO such weight sharing not supported in torchscript
self.mlm_decoder[-1].weight = self.backbone_model.word_embed.weight
|
Parameters
----------
backbone_cfg
The cfg of the backbone model
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
nsp_score = self.nsp_classifier(pooled_out)
if self.layout == 'NT':
mlm_features = contextual_embeddings[
th.arange(contextual_embeddings.shape[0]).unsqueeze(1), masked_positions]
else:
mlm_features = th.transpose(contextual_embeddings, 0,
1)[th.arange(contextual_embeddings.shape[1]).unsqueeze(1),
masked_positions]
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, nsp_score, mlm_scores
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence with respect to flattened batch
Shape (N, ) for N masked positions across whole batch.
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (N, vocab_size)
"""
assert len(inputs) % 2 == 0, 'Model expects QuickThought paired inputs'
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.layout == 'NT':
mlm_features = contextual_embeddings.flatten(0, 1)[masked_positions]
else:
mlm_features = th.transpose(contextual_embeddings, 0, 1).flatten(0, 1)[masked_positions]
mlm_scores = self.mlm_decoder(mlm_features)
qt_embeddings = self.quickthought(pooled_out)
qt_similarity = self._cosine_similarity(qt_embeddings[:len(inputs) // 2],
qt_embeddings[len(inputs) // 2:])
return contextual_embeddings, pooled_out, mlm_scores, qt_similarity
|
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence with respect to flattened batch
Shape (N, ) for N masked positions across whole batch.
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (N, vocab_size)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/bert.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py
|
Apache-2.0
|
def __init__(self, units: int = 512, hidden_size: int = 2048, num_heads: int = 8,
attention_dropout_prob: float = 0.1, hidden_dropout_prob: float = 0.1,
activation_dropout_prob: float = 0.0, layer_norm_eps: float = 1e-12,
pre_norm: bool = False, use_qkv_bias: bool = True, activation: str = 'relu',
layout='NT'):
"""
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
data -> norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
activation
The activation
layout
The layout
"""
super().__init__()
self._units = units
self._hidden_size = hidden_size
self._num_heads = num_heads
self._attention_dropout_prob = attention_dropout_prob
self._hidden_dropout_prob = hidden_dropout_prob
self._activation_dropout_prob = activation_dropout_prob
self._pre_norm = pre_norm
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
assert self._units % self._num_heads == 0, 'units must be divisive by the number of heads'
self.dropout_layer = nn.Dropout(hidden_dropout_prob)
self.attn_qkv = nn.Linear(out_features=3 * units, in_features=units, bias=use_qkv_bias)
self.attention_proj = nn.Linear(out_features=units, in_features=units, bias=True)
attention_layout = 'NTK' if self._layout == 'NT' else 'TNK'
self.attention_cell = \
MultiHeadAttentionCell(
query_units=self._units,
num_heads=self._num_heads,
attention_dropout=self._attention_dropout_prob,
scaled=True,
layout=attention_layout
)
self.layer_norm = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ffn = PositionwiseFFN(units=units, hidden_size=hidden_size,
dropout=hidden_dropout_prob,
activation_dropout=activation_dropout_prob,
layer_norm_eps=layer_norm_eps, activation=activation,
pre_norm=pre_norm)
|
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
data -> norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
activation
The activation
layout
The layout
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def forward(self, data, attn_mask):
"""
Parameters
----------
data :
If layout == 'NT'
Shape (batch_size, seq_length, C_in)
Else
Shape (seq_length, batch_size, C_in)
attn_mask :
Shape (batch_size, seq_length, seq_length)
Returns
-------
out :
If layout == 'NT'
Shape (batch_size, seq_length, C_out)
Else
Shape (seq_length, batch_size, C_out)
attn_weight :
Shape (batch_size, seq_length, seq_length)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
query, key, value = th.split(self.attn_qkv(data), self._units, dim=-1)
query = th.reshape(query, query.shape[:2] + (self._num_heads, -1))
key = th.reshape(key, key.shape[:2] + (self._num_heads, -1))
value = th.reshape(value, value.shape[:2] + (self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, attn_mask)
out = self.attention_proj(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
out = self.ffn(out)
return out, attn_weight
|
Parameters
----------
data :
If layout == 'NT'
Shape (batch_size, seq_length, C_in)
Else
Shape (seq_length, batch_size, C_in)
attn_mask :
Shape (batch_size, seq_length, seq_length)
Returns
-------
out :
If layout == 'NT'
Shape (batch_size, seq_length, C_out)
Else
Shape (seq_length, batch_size, C_out)
attn_weight :
Shape (batch_size, seq_length, seq_length)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def __init__(self, units: int = 512, mem_units: Optional[int] = None, hidden_size: int = 2048,
num_heads: int = 8, activation_dropout: float = 0.0, dropout: float = 0.1,
attention_dropout: float = 0.1, layer_norm_eps: float = 1E-5,
activation: str = 'relu', gated_proj: bool = False, pre_norm: bool = False,
use_qkv_bias: bool = True, layout='NT'):
"""
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
gated_proj
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
layout
Layout of the input
"""
super().__init__()
self._units = units
if mem_units is None:
mem_units = units
self._mem_units = mem_units
self._pre_norm = pre_norm
self._num_heads = num_heads
self._attention_dropout = attention_dropout
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
attention_layout = 'NTK' if layout == 'NT' else 'TNK'
self.dropout_layer = nn.Dropout(dropout)
if units % num_heads:
raise ValueError('In Transformer, units should be divided exactly by the number of '
'heads. Received units={}, num_heads={}'.format(units, num_heads))
self.attn_in_qkv = nn.Linear(out_features=3 * units, in_features=units, bias=use_qkv_bias)
self.self_attention = MultiHeadAttentionCell(query_units=units, num_heads=num_heads,
attention_dropout=self._attention_dropout,
layout=attention_layout)
self.proj_in = nn.Linear(out_features=units, in_features=units, bias=True)
self.attn_inter_q = nn.Linear(out_features=units, in_features=units, bias=use_qkv_bias)
self.attn_inter_k = nn.Linear(out_features=units, in_features=mem_units, bias=use_qkv_bias)
self.attn_inter_v = nn.Linear(out_features=units, in_features=mem_units, bias=use_qkv_bias)
self.inter_attention = MultiHeadAttentionCell(query_units=units, num_heads=num_heads,
attention_dropout=self._attention_dropout,
layout=attention_layout)
self.proj_inter = nn.Linear(in_features=units, out_features=units, bias=True)
self.ln_in = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ln_inter = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ffn = PositionwiseFFN(units=units, hidden_size=hidden_size, dropout=dropout,
activation_dropout=activation_dropout,
layer_norm_eps=layer_norm_eps, activation=activation,
gated_proj=gated_proj, pre_norm=pre_norm)
self.init_weights()
|
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
gated_proj
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
layout
Layout of the input
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def forward(self, data, mem, self_causal_mask, mem_attn_mask):
"""
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem :
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask :
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out :
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Get the causal self-attention value
residual = data
if self._pre_norm:
data = self.ln_in(data)
self_query, self_key, self_value = th.split(self.attn_in_qkv(data), self._units, dim=-1)
out, [_, self_attn_weight] = self.self_attention(
self_query.reshape((self_query.shape[0], self_query.shape[1], self._num_heads, -1)),
self_key.reshape((self_key.shape[0], self_key.shape[1], self._num_heads, -1)),
self_value.reshape((self_value.shape[0], self_value.shape[1], self._num_heads, -1)),
self_causal_mask)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
residual = data
if self._pre_norm:
data = self.ln_inter(data)
out, [_, context_attn_weight] = self.inter_attention(
th.reshape(self.attn_inter_q(data),
(data.shape[0], data.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_k(mem), (mem.shape[0], mem.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_v(mem), (mem.shape[0], mem.shape[1], self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
return out
|
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem :
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask :
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out :
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def init_states(self, batch_size, device=None, dtype='float32'):
"""Initialize the states required for incremental decoding
Parameters
----------
batch_size
device
dtype
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
dtype = to_torch_dtype(dtype)
if self.layout == 'NT':
init_key = th.zeros(
size=(batch_size, 0, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
init_value = th.zeros(
size=(batch_size, 0, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
else:
init_key = th.zeros(
size=(0, batch_size, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
init_value = th.zeros(
size=(0, batch_size, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
return init_key, init_value
|
Initialize the states required for incremental decoding
Parameters
----------
batch_size
device
dtype
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT':
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
"""
batch_size = data.shape[0]
if self.layout == 'NT':
time_axis = 1
else:
time_axis = 0
data = data.unsqueeze(time_axis)
residual = data
if self._pre_norm:
data = self.ln_in(data)
# Shape (B, prev_L, #Head, C_K), (B, prev_L, #Head, C_V)
# or (prev_L, B, #Head, C_K), (prev_L, B, #Head, C_V)
prev_key, prev_value = states
if mem_attn_mask is None:
mem_attn_mask = gen_mem_attn_mask(mem, mem_valid_length, data, None, layout=self.layout)
# 1. Get the causal self-attention value, we need to attend to both the current data
# and the previous stored key/values
# Shape (B, 1, 3 * num_heads * C_key)
# or (1, B, 3 * num_heads * C_key)
step_qkv = self.attn_in_qkv(data)
step_query, step_key, step_value = th.split(step_qkv, self._units, dim=-1)
step_query = th.reshape(
step_query, shape=(step_query.shape[0], step_query.shape[1], self._num_heads, -1))
step_key = th.reshape(step_key,
shape=(step_key.shape[0], step_key.shape[1], self._num_heads, -1))
step_value = th.reshape(
step_value, shape=(step_value.shape[0], step_value.shape[1], self._num_heads, -1))
new_key = th.cat([prev_key, step_key], dim=time_axis)
new_value = th.cat([prev_value, step_value], dim=time_axis)
out, [_, attn_weight] = self.self_attention(step_query, new_key, new_value, None)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
residual = data
if self._pre_norm:
data = self.ln_inter(data)
out, _ = self.inter_attention(
th.reshape(self.attn_inter_q(data),
shape=(data.shape[0], data.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_k(mem),
shape=(mem.shape[0], mem.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_v(mem),
shape=(mem.shape[0], mem.shape[1], self._num_heads, -1)), mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
out = th.reshape(out, shape=(batch_size, -1))
return out, (new_key, new_value)
|
Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT':
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def forward(self, data, valid_length, mem_data, mem_valid_length):
"""Run forward
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
self_causal_mask = gen_self_attn_mask(data, valid_length, attn_type='causal',
layout=self._layout)
mem_attn_mask = gen_mem_attn_mask(mem_data, mem_valid_length, data, valid_length,
layout=self._layout)
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out = layer(out, mem_data, self_causal_mask, mem_attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out
|
Run forward
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
|
forward
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def init_states(self, batch_size, device=None, dtype='float32'):
"""Initialize the states required for incremental decoding
Parameters
----------
batch_size
The batch size
device
The device
dtype
The data type of the states
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
states.append(layer.init_states(batch_size=batch_size, device=device, dtype=dtype))
return states
|
Initialize the states required for incremental decoding
Parameters
----------
batch_size
The batch size
device
The device
dtype
The data type of the states
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
|
init_states
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def incremental_decode(self, data, states, mem, mem_valid_length):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
time_axis = 0 if self.layout == 'TN' else 1
mem_length = mem.shape[time_axis]
# Generate the mem_attn_mask
time_steps = th.arange(mem_length, device=data.device) # (mem_length,)
mem_attn_mask = time_steps.reshape((1, 1, -1)) < mem_valid_length.reshape((-1, 1, 1))
new_states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, new_state = layer.incremental_decode(out, states[i], mem, mem_valid_length,
mem_attn_mask)
new_states.append(new_state)
if self._pre_norm:
out = self.ln_final(out)
return out, new_states
|
Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
|
incremental_decode
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/models/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py
|
Apache-2.0
|
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, q_16, p_16, m_16, v_16 = [], [], [], [], []
g_32, q_32, p_32, m_32, v_32 = [], [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedLANS does not support sparse gradients, please consider SparseAdam instead'
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Buffer for scaled grad
state['scaled_grad'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
q_16.append(state['scaled_grad'])
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
q_32.append(state['scaled_grad'])
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
if (len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_lans, self._dummy_overflow_buf,
[g_16, q_16, p_16, m_16, v_16], group['lr'], beta1, beta2,
group['eps'], group['step'], bias_correction,
group['weight_decay'], grad_averaging, self.adam_w_mode,
group['normalize_grad'])
if (len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_lans, self._dummy_overflow_buf,
[g_32, q_32, p_32, m_32, v_32], group['lr'], beta1, beta2,
group['eps'], group['step'], bias_correction,
group['weight_decay'], grad_averaging, self.adam_w_mode,
group['normalize_grad'])
return loss
|
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
|
step
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/optimizers/fused_lans.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/fused_lans.py
|
Apache-2.0
|
def get_warmup_linear_const_decay_poly_schedule(optimizer, total_steps, warmup_ratio=0.002,
const_ratio=0., degree=1.0, last_epoch=-1):
"""Create a schedule with a learning rate that decreases linearly from the
initial lr set in the optimizer to 0, after a warmup period during which it
increases linearly from 0 to the initial lr set in the optimizer and a
constant period.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
total_steps (:obj:`int`):
The total number of training steps.
warmup_ratio (:obj:`float`):
The number of steps for the warmup phase.
constant_ratio (:obj:`float`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(global_step: int):
x = global_step / total_steps
if warmup_ratio == 0.0:
return 1.0
elif x < warmup_ratio:
return x / warmup_ratio
elif x < warmup_ratio + const_ratio:
return 1.0
return ((1.0 - x) / (1.0 - warmup_ratio - const_ratio))**degree
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
Create a schedule with a learning rate that decreases linearly from the
initial lr set in the optimizer to 0, after a warmup period during which it
increases linearly from 0 to the initial lr set in the optimizer and a
constant period.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
total_steps (:obj:`int`):
The total number of training steps.
warmup_ratio (:obj:`float`):
The number of steps for the warmup phase.
constant_ratio (:obj:`float`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
get_warmup_linear_const_decay_poly_schedule
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/torch/optimizers/schedules.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/schedules.py
|
Apache-2.0
|
def clone_merge(self, cfg_filename_or_other_cfg):
"""Create a new cfg by cloning and merging with the given cfg
Parameters
----------
cfg_filename_or_other_cfg
Returns
-------
"""
ret = self.clone()
if isinstance(cfg_filename_or_other_cfg, str):
ret.merge_from_file(cfg_filename_or_other_cfg)
return ret
elif isinstance(cfg_filename_or_other_cfg, CfgNode):
ret.merge_from_other_cfg(cfg_filename_or_other_cfg)
return ret
elif cfg_filename_or_other_cfg is None:
return ret
else:
raise TypeError('Type of config path is not supported!')
|
Create a new cfg by cloning and merging with the given cfg
Parameters
----------
cfg_filename_or_other_cfg
Returns
-------
|
clone_merge
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/config.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/config.py
|
Apache-2.0
|
def glob(url, separator=','):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards.
Input may also include multiple patterns, separated by separator.
Parameters
----------
url : str
The name of the files
separator : str, default is ','
The separator in url to allow multiple patterns in the input
"""
patterns = [url] if separator is None else url.split(separator)
result = []
for pattern in patterns:
result.extend(_glob.glob(os.path.expanduser(pattern.strip())))
return result
|
Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards.
Input may also include multiple patterns, separated by separator.
Parameters
----------
url : str
The name of the files
separator : str, default is ','
The separator in url to allow multiple patterns in the input
|
glob
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def file_line_number(path: str) -> int:
"""
Parameters
----------
path
The path to calculate the number of lines in a file.
Returns
-------
ret
The number of lines
"""
ret = 0
with open(path, 'rb') as f:
for _ in f:
ret += 1
return ret
|
Parameters
----------
path
The path to calculate the number of lines in a file.
Returns
-------
ret
The number of lines
|
file_line_number
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def md5sum(filename):
"""Calculate the md5sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The md5sum
"""
with open(filename, mode='rb') as f:
d = hashlib.md5()
for buf in iter(functools.partial(f.read, 1024*100), b''):
d.update(buf)
return d.hexdigest()
|
Calculate the md5sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The md5sum
|
md5sum
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def sha1sum(filename):
"""Calculate the sha1sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The sha1sum
"""
with open(filename, mode='rb') as f:
d = hashlib.sha1()
for buf in iter(functools.partial(f.read, 1024*100), b''):
d.update(buf)
return d.hexdigest()
|
Calculate the sha1sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The sha1sum
|
sha1sum
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def logging_config(folder: Optional[str] = None,
name: Optional[str] = None,
logger: logging.Logger = logging.root,
level: int = logging.INFO,
console_level: int = logging.INFO,
console: bool = True,
overwrite_handler: bool = False) -> str:
"""Config the logging module. It will set the logger to save to the specified file path.
Parameters
----------
folder
The folder to save the log
name
Name of the saved
logger
The logger
level
Logging level
console_level
Logging level of the console log
console
Whether to also log to console
overwrite_handler
Whether to overwrite the existing handlers in the logger
Returns
-------
folder
The folder to save the log file.
"""
if name is None:
name = inspect.stack()[-1][1].split('.')[0]
if folder is None:
folder = os.path.join(os.getcwd(), name)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
need_file_handler = True
need_console_handler = True
# Check all loggers.
if overwrite_handler:
logger.handlers = []
else:
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
need_console_handler = False
logpath = os.path.join(folder, name + ".log")
print("All Logs will be saved to {}".format(logpath))
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if need_file_handler:
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
if console and need_console_handler:
# Initialze the console logging
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logger.addHandler(logconsole)
return folder
|
Config the logging module. It will set the logger to save to the specified file path.
Parameters
----------
folder
The folder to save the log
name
Name of the saved
logger
The logger
level
Logging level
console_level
Logging level of the console log
console
Whether to also log to console
overwrite_handler
Whether to overwrite the existing handlers in the logger
Returns
-------
folder
The folder to save the log file.
|
logging_config
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def logerror(logger: logging.Logger = logging.root):
"""A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
"""
def log_wrapper(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
# log the exception
logger.exception(
f'{function.__name__}(args={args}, kwargs={kwargs}) failed:\n{e}.')
raise e
return wrapper
return log_wrapper
|
A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
|
logerror
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
|
Collect data into fixed-length chunks or blocks
|
grouper
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def repeat(iterable, count=None):
"""Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A new iterable in which the basic iterator has been repeated for multiple rounds.
"""
if count is None:
while True:
for sample in iterable:
yield sample
else:
for i in range(count):
for sample in iterable:
yield sample
|
Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A new iterable in which the basic iterator has been repeated for multiple rounds.
|
repeat
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def load_checksum_stats(path: str) -> dict:
"""
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
"""
file_stats = dict()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
name, hex_hash, file_size = line.strip().split()
file_stats[name] = hex_hash
if name[8:27] == 'gluonnlp-numpy-data':
new_name = name.replace('https://gluonnlp-numpy-data.s3-accelerate.amazonaws.com', 's3://gluonnlp-numpy-data')
file_stats[new_name] = hex_hash
return file_stats
|
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
|
load_checksum_stats
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def download_file_from_google_drive(file_id, dest_path, overwrite=False, showsize=False):
"""Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it from the sharable link.
dest_path: str
the destination where to save the downloaded file.
Must be a path (for example: './downloaded_file.txt')
overwrite: bool
optional, if True forces re-download and overwrite.
showsize: bool
optional, if True print the current download size.
"""
destination_directory = os.path.dirname(dest_path)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
if not os.path.exists(dest_path) or overwrite:
session = requests.Session()
print('Downloading {} into {}... '.format(file_id, dest_path), end='')
sys.stdout.flush()
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL,
params={'id': file_id}, stream=True)
token = GoogleDriveDownloader._get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL,
params=params, stream=True)
if showsize:
print() # Skip to the next line
current_download_size = [0]
GoogleDriveDownloader._save_response_content(response, dest_path, showsize,
current_download_size)
print('Done.')
|
Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it from the sharable link.
dest_path: str
the destination where to save the downloaded file.
Must be a path (for example: './downloaded_file.txt')
overwrite: bool
optional, if True forces re-download and overwrite.
showsize: bool
optional, if True print the current download size.
|
download_file_from_google_drive
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def download(url: str,
path: Optional[str] = None,
overwrite: Optional[bool] = False,
sha1_hash: Optional[str] = None,
retries: Optional[int] = 5,
verify_ssl: Optional[bool] = True,
anonymous_credential: Optional[bool] = True) -> str:
"""Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl
Verify SSL certificates.
anonymous_credential
Whether to force to use anonymous credential if the path is from S3.
Returns
-------
fname
The file path of the downloaded file.
"""
is_s3 = url.startswith(S3_PREFIX)
if is_s3:
boto3, botocore = try_import_boto3()
s3 = boto3.resource('s3')
if boto3.session.Session().get_credentials() is None or anonymous_credential:
from botocore.handlers import disable_signing
s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
components = url[len(S3_PREFIX):].split('/')
if len(components) < 2:
raise ValueError('Invalid S3 url. Received url={}'.format(url))
s3_bucket_name = components[0]
s3_key = '/'.join(components[1:])
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
if is_s3:
response = s3.meta.client.head_object(Bucket=s3_bucket_name,
Key=s3_key)
total_size = int(response.get('ContentLength', 0))
random_uuid = str(uuid.uuid4())
tmp_path = '{}.{}'.format(fname, random_uuid)
if tqdm is not None:
def hook(t_obj):
def inner(bytes_amount):
t_obj.update(bytes_amount)
return inner
with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) as t:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path,
Callback=hook(t))
else:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path)
else:
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
total_size = int(r.headers.get('content-length', 0))
chunk_size = 1024
if tqdm is not None:
t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True)
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
if tqdm is not None:
t.update(len(chunk))
f.write(chunk)
if tqdm is not None:
t.close()
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
# atomic operation in the same file system
replace_file('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not sha1sum(fname) == sha1_hash:
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname
|
Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl
Verify SSL certificates.
anonymous_credential
Whether to force to use anonymous credential if the path is from S3.
Returns
-------
fname
The file path of the downloaded file.
|
download
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def check_version(min_version: str,
warning_only: bool = False,
library: Optional[ModuleType] = None):
"""Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
Minimum version
warning_only
Printing a warning instead of throwing an exception.
library
The target library for version check. Checks gluonnlp by default
"""
# pylint: disable=import-outside-toplevel
from .. import __version__
if library is None:
version = __version__
name = 'GluonNLP'
else:
version = library.__version__
name = library.__name__
from packaging.version import parse
bad_version = parse(version.replace('.dev', '')) < parse(min_version)
if bad_version:
msg = 'Installed {} version {} does not satisfy the ' \
'minimum required version {}'.format(name, version, min_version)
if warning_only:
warnings.warn(msg)
else:
raise AssertionError(msg)
|
Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
Minimum version
warning_only
Printing a warning instead of throwing an exception.
library
The target library for version check. Checks gluonnlp by default
|
check_version
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def init_comm(backend, gpus):
"""Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
"""
# backend specific implementation
import mxnet as mx
if backend == 'horovod':
try:
import horovod.mxnet as hvd # pylint: disable=import-outside-toplevel
except ImportError:
logging.info('horovod must be installed.')
sys.exit(1)
hvd.init()
store = None
num_workers = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
is_master_node = rank == local_rank
ctx_l = [mx.gpu(local_rank)]
logging.info('GPU communication supported by horovod')
else:
store = mx.kv.create(backend)
num_workers = store.num_workers
rank = store.rank
local_rank = 0
is_master_node = rank == local_rank
if gpus == '-1' or gpus == '':
ctx_l = [mx.cpu()]
logging.info('Runing on CPU')
else:
ctx_l = [mx.gpu(int(x)) for x in gpus.split(',')]
logging.info('GPU communication supported by KVStore')
return store, num_workers, rank, local_rank, is_master_node, ctx_l
|
Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
|
init_comm
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def get_mxnet_visible_ctx():
"""Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommended contexts to use for MXNet
"""
import mxnet as mx
num_gpus = mx.context.num_gpus()
if num_gpus == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = [mx.gpu(i) for i in range(num_gpus)]
return ctx_l
|
Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommended contexts to use for MXNet
|
get_mxnet_visible_ctx
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/misc.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py
|
Apache-2.0
|
def __init__(self, params=None):
"""Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t \theta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One Pass Large Scale Learning withAveraged Stochastic Gradient Descent"
Wei Xu (2011).
The idea is to average the parameters obtained by stochastic gradient descent.
Parameters
----------
params : ParameterDict
The parameters that we are going to track.
"""
self._track_params = None
self._average_params = None
self._initialized = False
self._n_steps = 0
if params is not None:
self.apply(params)
|
Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t heta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One Pass Large Scale Learning withAveraged Stochastic Gradient Descent"
Wei Xu (2011).
The idea is to average the parameters obtained by stochastic gradient descent.
Parameters
----------
params : ParameterDict
The parameters that we are going to track.
|
__init__
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def apply(self, params):
""" Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
"""
assert self._track_params is None, 'The MovingAverageTracker is already initialized and'\
' is not allowed to be initialized again. '
self._track_params = deduplicate_param_dict(params)
self._n_steps = 0
|
Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
|
apply
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def copy_back(self, params=None):
""" Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
"""
if params is None:
params = self._track_params
for k, v in self._average_params.items():
params[k].set_data(v)
|
Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
|
copy_back
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def grad_global_norm(parameters: Iterable[Parameter]) -> float:
"""Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm = grad_global_norm(net.collect_params().values())
...
Parameters
----------
parameters
The list of Parameters
Returns
-------
total_norm
Total norm. It's a numpy scalar.
"""
# Distribute gradients among contexts,
# For example, assume there are 8 weights and four GPUs, we can ask each GPU to
# compute the squared sum of two weights and then add the results together
idx = 0
arrays = defaultdict(list)
sum_norms = []
num_ctx = None
param_uuid_set = set()
for p in parameters:
if p._uuid in param_uuid_set:
continue
param_uuid_set.add(p._uuid)
if p.grad_req != 'null':
p_grads = p.list_grad()
if num_ctx is None:
num_ctx = len(p_grads)
else:
assert num_ctx == len(p_grads)
arrays[idx % num_ctx].append(p_grads[idx % num_ctx])
idx += 1
assert len(arrays) > 0, 'No parameter found available for gradient norm.'
# TODO(sxjscience)
# Investigate the float16 case.
# The inner computation accumulative type of norm should be float32.
ctx = arrays[0][0].context
for idx, arr_l in enumerate(arrays.values()):
sum_norm = mx.np.linalg.norm(mx.np.concatenate([mx.np.ravel(ele) for ele in arr_l]))
sum_norms.append(sum_norm.as_in_ctx(ctx))
# Reduce over ctx
if num_ctx == 1:
total_norm = sum_norms[0]
else:
total_norm = mx.np.linalg.norm(mx.np.concatenate(sum_norms, axis=None))
total_norm = float(total_norm)
return total_norm
|
Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm = grad_global_norm(net.collect_params().values())
...
Parameters
----------
parameters
The list of Parameters
Returns
-------
total_norm
Total norm. It's a numpy scalar.
|
grad_global_norm
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def clip_grad_global_norm(parameters: Iterable[Parameter],
max_norm: float,
check_isfinite: bool = True) -> Tuple[float, float, bool]:
"""Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
In cases where training happens on multiple contexts, this method should be used in
conjunction with ``trainer.allreduce_grads()`` and ``trainer.update()``.
(**not** ``trainer.step()``)
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size)
...
Parameters
----------
parameters
The list of parameters to calculate the norm
max_norm
If the gradient norm is larger than max_norm, it will be clipped to have max_norm
check_isfinite
If True, check whether the total_norm is finite (not nan or inf).
Returns
-------
total_norm
The total norm
ratio
The expected clipping ratio: grad = grad / ratio
It will be calculated as max(total_norm / max_norm, 1)
is_finite
Whether the total norm is finite
"""
total_norm = grad_global_norm(parameters)
is_finite = bool(np.isfinite(total_norm))
ratio = np.maximum(1, total_norm / max_norm)
if check_isfinite and not is_finite:
warnings.warn(
UserWarning('nan or inf is detected. Clipping results will be undefined.'
' Thus, skip clipping'),
stacklevel=2)
return total_norm, ratio, is_finite
scale = 1 / ratio
param_uuid_set = set()
for p in parameters:
if p._uuid in param_uuid_set:
continue
param_uuid_set.add(p._uuid)
if p.grad_req != 'null':
for arr in p.list_grad():
arr *= scale
return total_norm, ratio, is_finite
|
Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
In cases where training happens on multiple contexts, this method should be used in
conjunction with ``trainer.allreduce_grads()`` and ``trainer.update()``.
(**not** ``trainer.step()``)
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size)
...
Parameters
----------
parameters
The list of parameters to calculate the norm
max_norm
If the gradient norm is larger than max_norm, it will be clipped to have max_norm
check_isfinite
If True, check whether the total_norm is finite (not nan or inf).
Returns
-------
total_norm
The total norm
ratio
The expected clipping ratio: grad = grad / ratio
It will be calculated as max(total_norm / max_norm, 1)
is_finite
Whether the total norm is finite
|
clip_grad_global_norm
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def move_to_ctx(arr, ctx):
"""Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
"""
if isinstance(arr, tuple):
return tuple(move_to_ctx(ele, ctx) for ele in arr)
elif isinstance(arr, list):
return [move_to_ctx(ele, ctx) for ele in arr]
else:
return None if arr is None else arr.as_in_ctx(ctx)
|
Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
|
move_to_ctx
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def deduplicate_param_dict(param_dict):
"""Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
"""
dedup_param_dict = dict()
param_uuid_set = set()
for k in sorted(param_dict.keys()):
v = param_dict[k]
if v._uuid in param_uuid_set:
continue
dedup_param_dict[k] = v
param_uuid_set.add(v._uuid)
return dedup_param_dict
|
Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
|
deduplicate_param_dict
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def count_parameters(params) -> Tuple[int, int]:
"""
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
"""
num_params = 0
num_fixed_params = 0
param_uuid_set = set()
for k, v in params.items():
if v._uuid in param_uuid_set:
continue
param_uuid_set.add(v._uuid)
if v.grad_req != 'null':
if v._data is None:
warnings.warn('"{}" is not initialized! The total parameter count '
'will not be correct.'.format(k))
else:
num_params += np.prod(v.shape)
else:
if v._data is None:
warnings.warn('"{}" is not initialized! The total fixed parameter count '
'will not be correct.'.format(k))
else:
num_fixed_params += np.prod(v.shape)
return num_params, num_fixed_params
|
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
|
count_parameters
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/parameter.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py
|
Apache-2.0
|
def get_trimmed_lengths(lengths: List[int],
max_length: int,
do_merge: bool = False) -> np.ndarray:
"""Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
Make sure that sum(trimmed_lengths) <= max_length.
The strategy is to always try to trim the longer lengths.
- do_merge is False
Make sure that all(trimmed_lengths <= max_length)
Parameters
----------
lengths
The original lengths of each sample
max_length
When do_merge is True,
We set the max_length constraint on the total length.
When do_merge is False,
We set the max_length constraint on individual sentences.
do_merge
Whether these sentences will be merged
Returns
-------
trimmed_lengths
The trimmed lengths of the sequences.
"""
lengths = np.array(lengths)
if do_merge:
total_length = sum(lengths)
if total_length <= max_length:
return lengths
trimmed_lengths = np.zeros_like(lengths)
while sum(trimmed_lengths) != max_length:
remainder = max_length - sum(trimmed_lengths)
budgets = lengths - trimmed_lengths
nonzero_idx = (budgets > 0).nonzero()[0]
nonzero_budgets = budgets[nonzero_idx]
if remainder // len(nonzero_idx) == 0:
for i in range(remainder):
trimmed_lengths[nonzero_idx[i]] += 1
else:
increment = min(min(nonzero_budgets), remainder // len(nonzero_idx))
trimmed_lengths[nonzero_idx] += increment
return trimmed_lengths
else:
return np.minimum(lengths, max_length)
|
Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
Make sure that sum(trimmed_lengths) <= max_length.
The strategy is to always try to trim the longer lengths.
- do_merge is False
Make sure that all(trimmed_lengths <= max_length)
Parameters
----------
lengths
The original lengths of each sample
max_length
When do_merge is True,
We set the max_length constraint on the total length.
When do_merge is False,
We set the max_length constraint on individual sentences.
do_merge
Whether these sentences will be merged
Returns
-------
trimmed_lengths
The trimmed lengths of the sequences.
|
get_trimmed_lengths
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/preprocessing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py
|
Apache-2.0
|
def match_tokens_with_char_spans(token_offsets: np.ndarray,
spans: np.ndarray) -> np.ndarray:
"""Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
span[1] = min(span[1], token_offsets[-1, 1])
2: Find start + end
We try to select the smallest number of tokens that cover the entity, i.e.,
we will find start + end, in which tokens[start:end + 1] covers the span.
We will use the following algorithm:
For "start", we search for
token_offsets[start, 0] <= span[0] < token_offsets[start + 1, 0]
For "end", we search for:
token_offsets[end - 1, 1] < spans[1] <= token_offsets[end, 1]
Parameters
----------
token_offsets
The offsets of the input tokens. Must be sorted.
That is, it will satisfy
1. token_offsets[i][0] <= token_offsets[i][1]
2. token_offsets[i][0] <= token_offsets[i + 1][0]
3. token_offsets[i][1] <= token_offsets[i + 1][1]
Shape (#num_tokens, 2)
spans
The character-level offsets (begin/end) of the selected spans.
Shape (#spans, 2)
Returns
-------
token_start_ends
The token-level starts and ends. The end will also be used.
Shape (#spans, 2)
"""
if not isinstance(token_offsets, np.ndarray):
token_offsets = np.array(token_offsets)
if not isinstance(spans, np.ndarray):
spans = np.array(spans)
offsets_starts = token_offsets[:, 0]
offsets_ends = token_offsets[:, 1]
span_char_starts = spans[:, 0]
span_char_ends = spans[:, 1]
# Truncate the span
span_char_starts = np.maximum(offsets_starts[0], span_char_starts)
span_char_ends = np.minimum(offsets_ends[-1], span_char_ends)
# Search for valid start + end
span_token_starts = np.searchsorted(offsets_starts, span_char_starts, side='right') - 1
span_token_ends = np.searchsorted(offsets_ends, span_char_ends, side='left')
return np.concatenate((np.expand_dims(span_token_starts, axis=-1),
np.expand_dims(span_token_ends, axis=-1)), axis=-1)
|
Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
span[1] = min(span[1], token_offsets[-1, 1])
2: Find start + end
We try to select the smallest number of tokens that cover the entity, i.e.,
we will find start + end, in which tokens[start:end + 1] covers the span.
We will use the following algorithm:
For "start", we search for
token_offsets[start, 0] <= span[0] < token_offsets[start + 1, 0]
For "end", we search for:
token_offsets[end - 1, 1] < spans[1] <= token_offsets[end, 1]
Parameters
----------
token_offsets
The offsets of the input tokens. Must be sorted.
That is, it will satisfy
1. token_offsets[i][0] <= token_offsets[i][1]
2. token_offsets[i][0] <= token_offsets[i + 1][0]
3. token_offsets[i][1] <= token_offsets[i + 1][1]
Shape (#num_tokens, 2)
spans
The character-level offsets (begin/end) of the selected spans.
Shape (#spans, 2)
Returns
-------
token_start_ends
The token-level starts and ends. The end will also be used.
Shape (#spans, 2)
|
match_tokens_with_char_spans
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/preprocessing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py
|
Apache-2.0
|
def register(self, *args):
"""
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
"""
if len(args) == 2:
# Register an object with nick name by function call
nickname, obj = args
self._do_register(nickname, obj)
elif len(args) == 1:
if isinstance(args[0], str):
# Register an object with nick name by decorator
nickname = args[0]
def deco(func_or_class: object) -> object:
self._do_register(nickname, func_or_class)
return func_or_class
return deco
else:
# Register an object by function call
self._do_register(args[0].__name__, args[0])
elif len(args) == 0:
# Register an object by decorator
def deco(func_or_class: object) -> object:
self._do_register(func_or_class.__name__, func_or_class)
return func_or_class
return deco
else:
raise ValueError('Do not support the usage!')
|
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
|
register
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/registry.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py
|
Apache-2.0
|
def create(self, name: str, *args, **kwargs) -> object:
"""Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
"""
obj = self.get(name)
try:
return obj(*args, **kwargs)
except Exception as exp:
print('Cannot create name="{}" --> {} with the provided arguments!\n'
' args={},\n'
' kwargs={},\n'
.format(name, obj, args, kwargs))
raise exp
|
Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
|
create
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/registry.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py
|
Apache-2.0
|
def serialize(path, tbl):
"""Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then this function can store
these buffers out-of-band as files in `path` so that they subsequently be
re-used for zero-copy sharing accross processes.
Parameters
----------
path : pathlib.Path
Empty folder used to save serialized data. Usually a folder /dev/shm
tbl : object
Object to serialize. For example a PyArrow Table, a Pandas Dataframe or
any type that relies on NumPy to store the binary data.
"""
idx = 0
def buffer_callback(buf):
nonlocal idx
with open(path / f'{idx}.bin', 'wb') as f:
f.write(buf)
idx += 1
with open(path / 'meta.pkl', 'wb') as f:
pickle.dump(tbl, f, protocol=5, buffer_callback=buffer_callback)
|
Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then this function can store
these buffers out-of-band as files in `path` so that they subsequently be
re-used for zero-copy sharing accross processes.
Parameters
----------
path : pathlib.Path
Empty folder used to save serialized data. Usually a folder /dev/shm
tbl : object
Object to serialize. For example a PyArrow Table, a Pandas Dataframe or
any type that relies on NumPy to store the binary data.
|
serialize
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/shm.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py
|
Apache-2.0
|
def load(path):
"""Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
"""
num_buffers = len(list(path.iterdir())) - 1 # exclude meta.idx
buffers = []
for idx in range(num_buffers):
f = open(path / f'{idx}.bin', 'rb')
buffers.append(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ))
with open(path / 'meta.pkl', 'rb') as f:
return pickle.load(f, buffers=buffers)
|
Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
|
load
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/shm.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py
|
Apache-2.0
|
def is_match_states_batch_size(states, states_batch_axis, batch_size) -> bool:
"""Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
"""
if states_batch_axis is None:
return True
if isinstance(states_batch_axis, int):
if states.shape[states_batch_axis] == batch_size:
return True
for ele_states_batch_axis, ele_states in zip(states_batch_axis, states):
ret = is_match_states_batch_size(ele_states, ele_states_batch_axis, batch_size)
if ret is False:
return False
return True
|
Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
|
is_match_states_batch_size
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/testing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py
|
Apache-2.0
|
def verify_nmt_model(model, batch_size: int = 4,
src_seq_length: int = 5,
tgt_seq_length: int = 10,
atol: float = 1E-4,
rtol: float = 1E-3):
"""Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance.
rtol
Relative tolerance.
"""
src_word_sequence = mx.np.random.randint(0, model.src_vocab_size, (batch_size, src_seq_length))
tgt_word_sequence = mx.np.random.randint(0, model.tgt_vocab_size, (batch_size, tgt_seq_length))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
if model.layout == 'NT':
full_out = model(src_word_sequence, src_valid_length, tgt_word_sequence, tgt_valid_length)
else:
full_out = model(src_word_sequence.T, src_valid_length,
tgt_word_sequence.T, tgt_valid_length)
full_out = mx.np.swapaxes(full_out, 0, 1)
if full_out.shape != (batch_size, tgt_seq_length, model.tgt_vocab_size):
raise AssertionError('The output of NMT model does not match the expected output.'
' Model output shape = {}, Expected (B, T, V) = {}'
.format(full_out.shape,
(batch_size, tgt_seq_length, model.tgt_vocab_size)))
for partial_batch_size in range(1, batch_size + 1):
for i in range(1, min_tgt_seq_length):
if model.layout == 'NT':
partial_out = model(src_word_sequence[:partial_batch_size, :],
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)],
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
else:
partial_out = model(src_word_sequence[:partial_batch_size, :].T,
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)].T,
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
partial_out = mx.np.swapaxes(partial_out, 0, 1)
# Verify that the partial output matches the full output
for b in range(partial_batch_size):
partial_vl = tgt_valid_length.asnumpy()[b] - i
npt.assert_allclose(full_out[b, :partial_vl].asnumpy(),
partial_out[b, :partial_vl].asnumpy(), atol, rtol)
|
Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance.
rtol
Relative tolerance.
|
verify_nmt_model
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/testing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py
|
Apache-2.0
|
def verify_nmt_inference(train_model, inference_model,
batch_size=4, src_seq_length=5,
tgt_seq_length=10, atol=1E-4, rtol=1E-3):
"""Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance
rtol
Relative tolerance
"""
if train_model.layout == 'NT':
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(batch_size, src_seq_length))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(batch_size, tgt_seq_length))
else:
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(src_seq_length, batch_size))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(tgt_seq_length, batch_size))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
full_out = train_model(src_word_sequences, src_valid_length,
tgt_word_sequences, tgt_valid_length)
if train_model.layout == 'NT':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:partial_batch_size, :],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[:partial_batch_size, i],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=1)
npt.assert_allclose(full_out[:partial_batch_size, :min_tgt_seq_length].asnumpy(),
partial_out[:partial_batch_size, :].asnumpy(), atol, rtol)
elif train_model.layout == 'TN':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:, :partial_batch_size],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[i, :partial_batch_size],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=0)
npt.assert_allclose(full_out[:min_tgt_seq_length, :partial_batch_size].asnumpy(),
partial_out[:, :partial_batch_size].asnumpy(), atol, rtol)
else:
raise NotImplementedError
|
Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance
rtol
Relative tolerance
|
verify_nmt_inference
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/testing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py
|
Apache-2.0
|
def _cast_nested_to_fp16(nested_dat):
"""Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
"""
if isinstance(nested_dat, (mx.np.ndarray, np.ndarray)):
if nested_dat.dtype == np.float32:
return nested_dat.astype(np.float16)
else:
return nested_dat
elif isinstance(nested_dat, list):
return [_cast_nested_to_fp16(ele) for ele in nested_dat]
elif isinstance(nested_dat, tuple):
return tuple([_cast_nested_to_fp16(ele) for ele in nested_dat])
else:
raise NotImplementedError('Type is not supported!')
|
Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
|
_cast_nested_to_fp16
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/testing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py
|
Apache-2.0
|
def verify_backbone_fp16(model_cls, cfg, ctx, inputs,
atol=1E-2, rtol=1E-2, check_amp=True):
"""Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
The relative tolerance
check_amp
Whether to check the AMP process. You will need to ensure that there is no
randomness in the model when it is turned on.
"""
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
# Check forward
fp32_inputs = move_to_ctx(inputs, ctx=ctx)
outputs_fp32 = model_fp32(*fp32_inputs)
mx.npx.waitall()
# Check forward of fp16
model_fp16 = model_cls.from_cfg(cfg, dtype='float16')
model_fp16.share_parameters(model_fp32.collect_params())
model_fp16.cast('float16')
model_fp16.hybridize()
for param in model_fp16.collect_params().values():
assert param.dtype == 'float16'
fp16_inputs = move_to_ctx(_cast_nested_to_fp16(inputs), ctx=ctx)
outputs_fp16 = model_fp16(*fp16_inputs)
mx.npx.waitall()
_match_struct_output(outputs_fp16, outputs_fp32, atol=atol, rtol=rtol)
if check_amp:
from mxnet import amp
amp.init()
# Reconstruct the fp32 model
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
trainer = mx.gluon.Trainer(model_fp32.collect_params(), 'adam',
{'learning_rate': 1E-3, 'wd': 1E-4,
'multi_precision': True},
update_on_kvstore=False)
amp.init_trainer(trainer)
with mx.autograd.record():
outputs_amp = model_fp32(*fp32_inputs)
if not isinstance(outputs_amp, (tuple, list)):
loss = outputs_amp.mean()
else:
loss = sum([ele.mean() for ele in outputs_amp])
with amp.scale_loss(loss, trainer) as scaled_loss:
mx.autograd.backward(scaled_loss)
trainer.step(1)
mx.npx.waitall()
|
Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
The relative tolerance
check_amp
Whether to check the AMP process. You will need to ensure that there is no
randomness in the model when it is turned on.
|
verify_backbone_fp16
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/testing.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py
|
Apache-2.0
|
def get_ec2_tvm_flags() -> Dict[str, Dict]:
r"""Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that contains the mapping between instance type and the
corresponding compilation flags.
Each element includes:
- target
The compilation target
- use_gpu
Whether it's a GPU instance
- opt_level
The optimization level in compilation
- pass
Additional graph passes for further improvement.
"""
instance_info = {
'g4': {'target': "cuda -model=t4 -libs=cublas,cudnn",
'use_gpu': True,
'opt_level': 3,
'required_pass': ["FastMath"]},
'c4': {'target': 'llvm -mcpu=core-avx2 -libs=cblas',
'use_gpu': False,
'opt_level': 3,
'required_pass': ["FastMath"]},
'c5': {'target': 'llvm -mcpu=skylake-avx512 -libs=cblas',
'use_gpu': False,
'opt_level': 3,
'required_pass': ["FastMath"]},
'p3': {'target': 'cuda -model=v100 -libs=cublas,cudnn',
'use_gpu': True,
'opt_level': 3,
'required_pass': ["FastMath"]}
}
return instance_info
|
Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that contains the mapping between instance type and the
corresponding compilation flags.
Each element includes:
- target
The compilation target
- use_gpu
Whether it's a GPU instance
- opt_level
The optimization level in compilation
- pass
Additional graph passes for further improvement.
|
get_ec2_tvm_flags
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/tvm_utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py
|
Apache-2.0
|
def update_tvm_convert_map() -> None:
"""A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py"""
op = (('masked_softmax', _mx_masked_softmax),)
_convert_map.update({key: value for key, value in op})
|
A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py
|
update_tvm_convert_map
|
python
|
dmlc/gluon-nlp
|
src/gluonnlp/utils/tvm_utils.py
|
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py
|
Apache-2.0
|
def test_test():
"""Test that fixing a random seed works."""
py_rnd = random.randint(0, 100)
np_rnd = np.random.randint(0, 100)
mx_rnd = mx.nd.random_uniform(shape=(1, )).asscalar()
random.seed(1)
mx.random.seed(1)
np.random.seed(1)
assert py_rnd == random.randint(0, 100)
assert np_rnd == np.random.randint(0, 100)
assert mx_rnd == mx.nd.random_uniform(shape=(1, )).asscalar()
|
Test that fixing a random seed works.
|
test_test
|
python
|
dmlc/gluon-nlp
|
tests/test_pytest.py
|
https://github.com/dmlc/gluon-nlp/blob/master/tests/test_pytest.py
|
Apache-2.0
|
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
|
Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
|
is_image_file
|
python
|
ajbrock/BigGAN-PyTorch
|
datasets.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py
|
MIT
|
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
|
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
|
__getitem__
|
python
|
ajbrock/BigGAN-PyTorch
|
datasets.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py
|
MIT
|
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
|
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
|
__getitem__
|
python
|
ajbrock/BigGAN-PyTorch
|
datasets.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py
|
MIT
|
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
|
Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
|
torch_cov
|
python
|
ajbrock/BigGAN-PyTorch
|
inception_utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py
|
MIT
|
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
print('wat')
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
return out
|
Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
|
numpy_calculate_frechet_distance
|
python
|
ajbrock/BigGAN-PyTorch
|
inception_utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py
|
MIT
|
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out
|
Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
|
torch_calculate_frechet_distance
|
python
|
ajbrock/BigGAN-PyTorch
|
inception_utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py
|
MIT
|
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0,high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0,high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1])
|
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
|
__call__
|
python
|
ajbrock/BigGAN-PyTorch
|
utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py
|
MIT
|
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n')
|
Assumption: no newlines in the input.
|
log
|
python
|
ajbrock/BigGAN-PyTorch
|
utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py
|
MIT
|
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n+1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n%1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else:# displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60)))
|
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
|
progress
|
python
|
ajbrock/BigGAN-PyTorch
|
utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py
|
MIT
|
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss
|
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
|
step
|
python
|
ajbrock/BigGAN-PyTorch
|
utils.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py
|
MIT
|
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
# print('a')
# print(type(sum_), type(ssum), type(sum_size), sum_.shape, ssum.shape, sum_size)
# broadcasted = Broadcast.apply(target_gpus, sum_, ssum, torch.tensor(sum_size).float().to(sum_.device))
# print('b')
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
# outputs.append((rec[0], _MasterMessage(*broadcasted[i*3:i*3+3])))
return outputs
|
Reduce the sum and square-sum, compute the statistics, and broadcast it.
|
_data_parallel_master
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/batchnorm.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py
|
MIT
|
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, torch.rsqrt(bias_var + self.eps)
# return mean, bias_var.clamp(self.eps) ** -0.5
|
Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device.
|
_compute_mean_std
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/batchnorm.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py
|
MIT
|
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
|
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
|
__init__
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/comm.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py
|
MIT
|
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
|
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
|
register_slave
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/comm.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py
|
MIT
|
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
|
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
|
run_master
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/comm.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py
|
MIT
|
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
|
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
|
execute_replication_callbacks
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/replicate.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py
|
MIT
|
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
|
patch_replication_callback
|
python
|
ajbrock/BigGAN-PyTorch
|
sync_batchnorm/replicate.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py
|
MIT
|
def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):
"""Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
"""
if os.path.exists(hdf5_path) and (not redownload):
print('Loading BigGAN hdf5 file from:', hdf5_path)
return h5py.File(hdf5_path, 'r')
print('Loading BigGAN module from:', module_path)
tf.reset_default_graph()
hub.Module(module_path)
print('Loaded BigGAN module from:', module_path)
initializer = tf.global_variables_initializer()
sess = tf.Session()
sess.run(initializer)
print('Saving BigGAN weights to :', hdf5_path)
h5f = h5py.File(hdf5_path, 'w')
for var in tf.global_variables():
val = sess.run(var)
h5f.create_dataset(var.name, data=val)
print(f'Saving {var.name} with shape {val.shape}')
h5f.close()
return h5py.File(hdf5_path, 'r')
|
Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
|
dump_tfhub_to_hdf5
|
python
|
ajbrock/BigGAN-PyTorch
|
TFHub/converter.py
|
https://github.com/ajbrock/BigGAN-PyTorch/blob/master/TFHub/converter.py
|
MIT
|
def read_img(t_imgfname, input_size, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(t_imgfname)
# img = tf.image.decode_image(img_contents, channels=3)
img = tf.image.decode_png(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= img_mean
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
newshape = tf.squeeze(tf.stack([h, w]), squeeze_dims=[1])
img2 = tf.image.resize_images(img, newshape)
else:
img2 = tf.image.resize_images(img, tf.shape(img)[0:2,]*2)
return img2, img
|
Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
|
read_img
|
python
|
iyah4888/SIGGRAPH18SSS
|
main_hyper.py
|
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/main_hyper.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.