code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def process_mono_corpus(self, corpus_paths: List[str], out_path: str, chunk_size: int = 1024 * 1024, num_process: int = 8) -> int: """Preprocess the mono corpus Parameters ---------- corpus_paths Corpus paths out_path Write the results to the output path chunk_size Approximately split the corpus files into multiple chunks num_process The number of process Returns ------- line_count The number of lines in the final filtered file """ start = time.time() total_line_count = 0 filtered_line_count = 0 def chunk_iterator(step=10): for path in corpus_paths: line_pos = get_line_byte_start(path) line_size = line_pos[1:] - line_pos[:-1] num_lines = line_pos.shape[0] - 1 budget = chunk_size chunk_start = 0 cur_chunk_size = 0 for i in range(0, num_lines, step): line_batch_num = min(num_lines - i, step) batch_line_size = line_size[i:(i + line_batch_num)].sum() budget -= batch_line_size cur_chunk_size += batch_line_size if budget <= 0 or i + step >= num_lines: yield path, chunk_start, cur_chunk_size chunk_start += cur_chunk_size cur_chunk_size = 0 budget = chunk_size with open(out_path, 'w', encoding='utf-8', newline='\n') as out_f: with multiprocessing.Pool(num_process) as pool: for i, (processed_lines, unfiltered_line_num) in \ enumerate(pool.imap(self.process_chunk, chunk_iterator())): out_f.write('\n'.join(processed_lines) + '\n') filtered_line_count += len(processed_lines) total_line_count += unfiltered_line_num if (i + 1) % 100 == 0: print('Chunk {}, #Lines Processed: {}, Filtered: {}, Remain: {}' .format(i + 1, total_line_count, total_line_count - filtered_line_count, filtered_line_count)) end = time.time() print('Done, #Lines {}/{}, Time spent {}'.format(filtered_line_count, total_line_count, end - start)) return filtered_line_count
Preprocess the mono corpus Parameters ---------- corpus_paths Corpus paths out_path Write the results to the output path chunk_size Approximately split the corpus files into multiple chunks num_process The number of process Returns ------- line_count The number of lines in the final filtered file
process_mono_corpus
python
dmlc/gluon-nlp
scripts/processing/clean_tok_corpus.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py
Apache-2.0
def calc_approx_error(expected_tensor: np.ndarray, observed_tensor: np.ndarray) -> float: ''' Calculating relative error for one tensor ''' error = observed_tensor - expected_tensor absolute_error = np.abs(error) mean_absolute_error = absolute_error.mean() mean_expected_value = np.abs(expected_tensor).mean() error = mean_absolute_error / mean_expected_value return error
Calculating relative error for one tensor
calc_approx_error
python
dmlc/gluon-nlp
scripts/question_answering/custom_strategy.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py
Apache-2.0
def get_approx_errors(expected_tensors, observed_tensors): ''' Calculating relative error for multiple tensors: Dict[tensors_name: str, tensor: np.ndarray] ''' errors = {} for node_name in observed_tensors.keys(): expected_tensor = expected_tensors[node_name][node_name] observed_tensor = observed_tensors[node_name][node_name] errors[node_name] = calc_approx_error(expected_tensor, observed_tensor) return errors
Calculating relative error for multiple tensors: Dict[tensors_name: str, tensor: np.ndarray]
get_approx_errors
python
dmlc/gluon-nlp
scripts/question_answering/custom_strategy.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py
Apache-2.0
def get_qtensors(self, quant_cfg, node_list): ''' Generating quantized model based on configuration and capturing intermediate tensors ''' qmodel = self.adaptor.quantize(quant_cfg, self.model, self.calib_dataloader) tensors = self.adaptor.inspect_tensor(qmodel, self.calib_dataloader, node_list, [1]) # 1 is a batch index return tensors['activation'][0] # we need to specify that we want activation (layer output) because INC stores also weight tensors # 0 is the first batch
Generating quantized model based on configuration and capturing intermediate tensors
get_qtensors
python
dmlc/gluon-nlp
scripts/question_answering/custom_strategy.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py
Apache-2.0
def bayesian_params_to_tune_configs(self, params): ''' Creating configuration from params - changing configurations' indexes for real configurations ''' node_cfgs = {} for node_key, configs in self.opwise_quant_cfgs.items(): if node_key in params: value = int(params[node_key]) value = min(value, len(configs) - 1) node_cfgs[node_key] = copy.deepcopy(configs[value]) return node_cfgs
Creating configuration from params - changing configurations' indexes for real configurations
bayesian_params_to_tune_configs
python
dmlc/gluon-nlp
scripts/question_answering/custom_strategy.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py
Apache-2.0
def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
Lower text and remove punctuation, articles and extra whitespace.
normalize_answer
python
dmlc/gluon-nlp
scripts/question_answering/eval_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py
Apache-2.0
def compute_f1(a_gold, a_pred): """ Compute the token-level f1 scores in which the common tokens are considered as True Positives. Precision and recall are percentages of the number of common tokens in the prediction and groud truth, respectively. """ gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1
Compute the token-level f1 scores in which the common tokens are considered as True Positives. Precision and recall are percentages of the number of common tokens in the prediction and groud truth, respectively.
compute_f1
python
dmlc/gluon-nlp
scripts/question_answering/eval_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py
Apache-2.0
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): """ Find the best threshold of the raw scores. The initial score is set to the number of unanswerable questions, assuming that each unanswerable question is successfully predicted. In the following traverse, the best threshold is constantly adjusted according to the difference from the assumption ('diff'). """ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 # Rearrange the na_probs in an ascending order, so that the questions # with higher probability of answerability the sooner will be read. qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: # For the answerable question diff = scores[qid] else: # For the unanswerable question if preds[qid]: # Falsely predict the answerability diff = -1 else: # Correctly predict the answerability. This is Only true if the # prediction is blank, which is no the case before revision diff = 0 cur_score += diff if cur_score > best_score: # adjust the best thresh over current thresh (na_probs[qid]) best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh
Find the best threshold of the raw scores. The initial score is set to the number of unanswerable questions, assuming that each unanswerable question is successfully predicted. In the following traverse, the best threshold is constantly adjusted according to the difference from the assumption ('diff').
find_best_thresh
python
dmlc/gluon-nlp
scripts/question_answering/eval_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py
Apache-2.0
def revise_unanswerable(preds, na_probs, na_prob_thresh): """ Revise the predictions results and return a null string for unanswerable question whose unanswerable probability above the threshold. Parameters ---------- preds: dict A dictionary of full prediction of spans na_probs: dict A dictionary of unanswerable probabilities na_prob_thresh: float threshold of the unanswerable probability Returns ------- revised: dict A dictionary of revised prediction """ revised = copy.deepcopy(preds) for q_id in na_probs.keys(): if na_probs[q_id] > na_prob_thresh: revised[q_id] = "" return revised
Revise the predictions results and return a null string for unanswerable question whose unanswerable probability above the threshold. Parameters ---------- preds: dict A dictionary of full prediction of spans na_probs: dict A dictionary of unanswerable probabilities na_prob_thresh: float threshold of the unanswerable probability Returns ------- revised: dict A dictionary of revised prediction
revise_unanswerable
python
dmlc/gluon-nlp
scripts/question_answering/eval_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py
Apache-2.0
def squad_eval(data_file, preds, na_probs, na_prob_thresh=0.0, revise=False): """ Parameters ---------- data_file dataset(list) or data_file(str) preds predictions dictionary na_probs probabilities dictionary of unanswerable na_prob_thresh threshold of unanswerable revise Wether to get the final predictions with impossible answers replaced with null string '' Returns ------- out_eval A dictionary of output results (preds_out) A dictionary of final predictions """ if isinstance(data_file, str): with open(data_file) as f: dataset_json = json.load(f) dataset = dataset_json['data'] elif isinstance(data_file, list): dataset = data_file if na_probs is None: na_probs = {k: 0.0 for k in preds} # not necessary to revise results of SQuAD 1.1 revise = False qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict( exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, 'HasAns') if no_ans_qids: no_ans_eval = make_eval_dict( exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, 'NoAns') find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if revise: thresh = (out_eval['best_exact_thresh'] + out_eval['best_f1_thresh']) * 0.5 preds_out = revise_unanswerable(preds, na_probs, thresh) return out_eval, preds_out else: return out_eval, preds
Parameters ---------- data_file dataset(list) or data_file(str) preds predictions dictionary na_probs probabilities dictionary of unanswerable na_prob_thresh threshold of unanswerable revise Wether to get the final predictions with impossible answers replaced with null string '' Returns ------- out_eval A dictionary of output results (preds_out) A dictionary of final predictions
squad_eval
python
dmlc/gluon-nlp
scripts/question_answering/eval_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py
Apache-2.0
def forward(self, tokens, token_types, valid_length, p_mask): """ Parameters ---------- tokens Shape (batch_size, seq_length) The merged input tokens token_types Shape (batch_size, seq_length) Token types for the sequences, used to indicate whether the word belongs to the first sentence or the second one. valid_length Shape (batch_size,) Valid length of the sequence. This is used to mask the padded tokens. p_mask The mask that is associated with the tokens. Returns ------- start_logits Shape (batch_size, sequence_length) The log-softmax scores that the position is the start position. end_logits Shape (batch_size, sequence_length) The log-softmax scores that the position is the end position. """ # Get contextual embedding with the shape (batch_size, sequence_length, C) if self.use_segmentation: contextual_embeddings = self.backbone(tokens, token_types, valid_length) else: contextual_embeddings = self.backbone(tokens, valid_length) scores = self.qa_outputs(contextual_embeddings) start_scores = scores[:, :, 0] end_scores = scores[:, :, 1] start_logits = masked_logsoftmax(start_scores, mask=p_mask, axis=-1) end_logits = masked_logsoftmax(end_scores, mask=p_mask, axis=-1) return start_logits, end_logits
Parameters ---------- tokens Shape (batch_size, seq_length) The merged input tokens token_types Shape (batch_size, seq_length) Token types for the sequences, used to indicate whether the word belongs to the first sentence or the second one. valid_length Shape (batch_size,) Valid length of the sequence. This is used to mask the padded tokens. p_mask The mask that is associated with the tokens. Returns ------- start_logits Shape (batch_size, sequence_length) The log-softmax scores that the position is the start position. end_logits Shape (batch_size, sequence_length) The log-softmax scores that the position is the end position.
forward
python
dmlc/gluon-nlp
scripts/question_answering/models.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py
Apache-2.0
def inference(self, tokens, token_types, valid_length, p_mask, start_top_n: int = 5, end_top_n: int = 5): """Get the inference result with beam search Parameters ---------- tokens The input tokens. Shape (batch_size, sequence_length) token_types The input token types. Shape (batch_size, sequence_length) valid_length The valid length of the tokens. Shape (batch_size,) p_mask The mask which indicates that some tokens won't be used in the calculation. Shape (batch_size, sequence_length) start_top_n The number of candidates to select for the start position. end_top_n The number of candidates to select for the end position. Returns ------- start_top_logits The top start logits Shape (batch_size, start_top_n) start_top_index Index of the top start logits Shape (batch_size, start_top_n) end_top_logits The top end logits. Shape (batch_size, end_top_n) end_top_index Index of the top end logits Shape (batch_size, end_top_n) """ # Shape (batch_size, sequence_length, C) if self.use_segmentation: contextual_embeddings = self.backbone(tokens, token_types, valid_length) else: contextual_embeddings = self.backbone(tokens, valid_length) scores = self.qa_outputs(contextual_embeddings) start_scores = scores[:, :, 0] end_scores = scores[:, :, 1] start_logits = masked_logsoftmax(start_scores, mask=p_mask, axis=-1) end_logits = masked_logsoftmax(end_scores, mask=p_mask, axis=-1) # The shape of start_top_index will be (..., start_top_n) start_top_logits, start_top_index = mx.npx.topk(start_logits, k=start_top_n, axis=-1, ret_typ='both') # Note that end_top_index and end_top_log_probs have shape (bsz, start_n_top, end_n_top) # So that for each start position, there are end_n_top end positions on the third dim. end_top_logits, end_top_index = mx.npx.topk(end_logits, k=end_top_n, axis=-1, ret_typ='both') return start_top_logits, start_top_index, end_top_logits, end_top_index
Get the inference result with beam search Parameters ---------- tokens The input tokens. Shape (batch_size, sequence_length) token_types The input token types. Shape (batch_size, sequence_length) valid_length The valid length of the tokens. Shape (batch_size,) p_mask The mask which indicates that some tokens won't be used in the calculation. Shape (batch_size, sequence_length) start_top_n The number of candidates to select for the start position. end_top_n The number of candidates to select for the end position. Returns ------- start_top_logits The top start logits Shape (batch_size, start_top_n) start_top_index Index of the top start logits Shape (batch_size, start_top_n) end_top_logits The top end logits. Shape (batch_size, end_top_n) end_top_index Index of the top end logits Shape (batch_size, end_top_n)
inference
python
dmlc/gluon-nlp
scripts/question_answering/models.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py
Apache-2.0
def get_squad_features(args, tokenizer, segment): """ Get processed data features of SQuADExampls Parameters ---------- args : argparse.Namespace tokenizer: Tokenizer instance segment: str train or dev Returns ------- data_features The list of processed data features """ data_cache_path = os.path.join(CACHE_PATH, '{}_{}_squad_{}.ndjson'.format( segment, args.model_name, args.version)) is_training = (segment == 'train') if os.path.exists(data_cache_path) and not args.overwrite_cache: data_features = [] with open(data_cache_path, 'r') as f: for line in f: data_features.append(SquadFeature.from_json(line)) logging.info('Found cached data features, load from {}'.format(data_cache_path)) else: data_examples = get_squad_examples(args.data_dir, segment=segment, version=args.version) start = time.time() num_process = min(cpu_count(), 8) logging.info('Tokenize Data:') with Pool(num_process) as pool: data_features = pool.map(functools.partial(convert_squad_example_to_feature, tokenizer=tokenizer, is_training=is_training), data_examples) logging.info('Done! Time spent:{:.2f} seconds'.format(time.time() - start)) with open(data_cache_path, 'w', encoding='utf-8') as f: for feature in data_features: f.write(feature.to_json() + '\n') return data_features
Get processed data features of SQuADExampls Parameters ---------- args : argparse.Namespace tokenizer: Tokenizer instance segment: str train or dev Returns ------- data_features The list of processed data features
get_squad_features
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def get_network(model_name, ctx_l, dropout=0.1, checkpoint_path=None, backbone_path=None, dtype='float32'): """ Get the network that fine-tune the Question Answering Task Parameters ---------- model_name : str The model name of the backbone model ctx_l : Context list of training device like [mx.gpu(0), mx.gpu(1)] dropout : float Dropout probability of the task specified layer checkpoint_path: str Path to a Fine-tuned checkpoint backbone_path: str Path to the backbone model to be loaded in qa_net Returns ------- cfg tokenizer qa_net use_segmentation """ # Create the network use_segmentation = 'roberta' not in model_name and 'xlmr' not in model_name Model, cfg, tokenizer, download_params_path, _ = \ get_backbone(model_name, load_backbone=not backbone_path) backbone = Model.from_cfg(cfg, use_pooler=False, dtype=dtype) # Load local backbone parameters if backbone_path provided. # Otherwise, download backbone parameters from gluon zoo. backbone_params_path = backbone_path if backbone_path else download_params_path if checkpoint_path is None: backbone.load_parameters(backbone_params_path, ignore_extra=True, ctx=ctx_l, cast_dtype=True) num_params, num_fixed_params\ = count_parameters(deduplicate_param_dict(backbone.collect_params())) logging.info( 'Loading Backbone Model from {}, with total/fixd parameters={}/{}'.format( backbone_params_path, num_params, num_fixed_params)) qa_net = ModelForQAConditionalV1(backbone=backbone, dropout_prob=dropout, use_segmentation=use_segmentation, weight_initializer=TruncNorm(stdev=0.02)) if checkpoint_path is None: # Ignore the UserWarning during initialization, # There is no need to re-initialize the parameters of backbone qa_net.initialize(ctx=ctx_l) else: qa_net.load_parameters(checkpoint_path, ctx=ctx_l, cast_dtype=True) qa_net.hybridize() return cfg, tokenizer, qa_net, use_segmentation
Get the network that fine-tune the Question Answering Task Parameters ---------- model_name : str The model name of the backbone model ctx_l : Context list of training device like [mx.gpu(0), mx.gpu(1)] dropout : float Dropout probability of the task specified layer checkpoint_path: str Path to a Fine-tuned checkpoint backbone_path: str Path to the backbone model to be loaded in qa_net Returns ------- cfg tokenizer qa_net use_segmentation
get_network
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def setup_logging(args, local_rank): """ Setup logging configuration as well as random seed """ logging_config(args.output_dir, name='finetune_squad{}'.format(args.version),# avoid race overwrite_handler=True, console=(local_rank == 0)) logging.info(args) set_seed(args.seed) logging.debug('Random seed set to {}'.format(args.seed))
Setup logging configuration as well as random seed
setup_logging
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def predict_extended(original_feature, chunked_features, results, n_best_size, max_answer_length=64, start_top_n=5, end_top_n=5): """Get prediction results for SQuAD. Start Logits: (B, N_start) End Logits: (B, N_start, N_end) Parameters ---------- original_feature: The original SquadFeature before chunked chunked_features List of ChunkFeatures results List of model predictions for span start and span end. n_best_size Best N results written to file max_answer_length Maximum length of the answer tokens. start_top_n Number of start-position candidates end_top_n Number of end-position candidates Returns ------- not_answerable_score Model's estimate that the question is not answerable. prediction The final prediction. nbest_json n-best predictions with their probabilities. """ not_answerable_score = 1000000 # Score for not-answerable. We set it to be a large and positive # If one chunk votes for answerable, we will treat the context as answerable, # Thus, the overall not_answerable_score = min(chunk_not_answerable_score) all_start_idx = [] all_end_idx = [] all_pred_score = [] context_length = len(original_feature.context_token_ids) token_max_context_score = np.full((len(chunked_features), context_length), -np.inf, dtype=np.float32) for i, chunked_feature in enumerate(chunked_features): chunk_start = chunked_feature.chunk_start chunk_length = chunked_feature.chunk_length for j in range(chunk_start, chunk_start + chunk_length): # This is a heuristic score # TODO investigate the impact token_max_context_score[i, j] = min(j - chunk_start, chunk_start + chunk_length - 1 - j) \ + 0.01 * chunk_length token_max_chunk_id = token_max_context_score.argmax(axis=0) for chunk_id, (result, chunk_feature) in enumerate(zip(results, chunked_features)): # We use the log-likelihood as the not answerable score. # Thus, a high score indicates that the answer is not answerable cur_not_answerable_score = float(result.answerable_logits[1]) not_answerable_score = min(not_answerable_score, cur_not_answerable_score) # Calculate the start_logits + end_logits as the overall score context_offset = chunk_feature.context_offset chunk_start = chunk_feature.chunk_start chunk_length = chunk_feature.chunk_length for i in range(start_top_n): for j in range(end_top_n): pred_score = result.start_top_logits[i] + result.end_top_logits[i, j] start_index = result.start_top_index[i] end_index = result.end_top_index[i, j] # We could hypothetically create invalid predictions, e.g., predict # that the start of the answer span is in the query tokens or out of # the chunk. We throw out all invalid predictions. if not (context_offset <= start_index < context_offset + chunk_length) or \ not (context_offset <= end_index < context_offset + chunk_length) or \ end_index < start_index: continue pred_answer_length = end_index - start_index + 1 if pred_answer_length > max_answer_length: continue start_idx = int(start_index - context_offset + chunk_start) end_idx = int(end_index - context_offset + chunk_start) if token_max_chunk_id[start_idx] != chunk_id: continue all_start_idx.append(start_idx) all_end_idx.append(end_idx) all_pred_score.append(pred_score) sorted_start_end_score = sorted(zip(all_start_idx, all_end_idx, all_pred_score), key=lambda args: args[-1], reverse=True) nbest = [] context_text = original_feature.context_text context_token_offsets = original_feature.context_token_offsets seen_predictions = set() for start_idx, end_idx, pred_score in sorted_start_end_score: if len(seen_predictions) >= n_best_size: break pred_answer = context_text[context_token_offsets[start_idx][0]: context_token_offsets[end_idx][1]] seen_predictions.add(pred_answer) nbest.append((pred_answer, pred_score)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if len(nbest) == 0: nbest.append(('', float('-inf'))) all_scores = np.array([ele[1] for ele in nbest], dtype=np.float32) probs = np.exp(all_scores) / np.sum(np.exp(all_scores)) nbest_json = [] for i, (entry, prob) in enumerate(zip(nbest, probs)): output = collections.OrderedDict() output['text'] = entry[0] output['probability'] = float(prob) nbest_json.append(output) assert len(nbest_json) >= 1 return not_answerable_score, nbest[0][0], nbest_json
Get prediction results for SQuAD. Start Logits: (B, N_start) End Logits: (B, N_start, N_end) Parameters ---------- original_feature: The original SquadFeature before chunked chunked_features List of ChunkFeatures results List of model predictions for span start and span end. n_best_size Best N results written to file max_answer_length Maximum length of the answer tokens. start_top_n Number of start-position candidates end_top_n Number of end-position candidates Returns ------- not_answerable_score Model's estimate that the question is not answerable. prediction The final prediction. nbest_json n-best predictions with their probabilities.
predict_extended
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def collect(self, name, op_name, arr): """Callback function for collecting min and max values from an NDArray.""" if name not in self.include_layers: return arr = arr.copyto(mx.cpu()).asnumpy() min_range = np.min(arr) max_range = np.max(arr) if (name.find("sg_onednn_fully_connected_eltwise") != -1 or op_name.find("LayerNorm") != -1) \ and max_range > self.clip_max: max_range = self.clip_max elif name.find('sg_onednn_fully_connected') != -1 and min_range < self.clip_min: min_range = self.clip_min if name in self.min_max_dict: cur_min_max = self.min_max_dict[name] self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range)) else: self.min_max_dict[name] = (min_range, max_range)
Callback function for collecting min and max values from an NDArray.
collect
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def eval_validation(ckpt_name, best_eval): """ Model inference during validation or final evaluation. """ dev_dataloader = mx.gluon.data.DataLoader( dev_all_chunk_features, batchify_fn=dataset_processor.BatchifyFunction, batch_size=args.eval_batch_size, num_workers=0, shuffle=False) if args.dtype == 'int8': quantize_and_calibrate(qa_net, dev_dataloader) log_interval = args.eval_log_interval all_results = [] epoch_tic = time.time() tic = time.time() epoch_size = len(dev_features) total_num = 0 log_num = 0 for batch_idx, dev_batch in enumerate(grouper(dev_dataloader, len(ctx_l))): # Predict for each chunk for sample, ctx in zip(dev_batch, ctx_l): if sample is None: continue # Copy the data to device tokens = sample.data.as_in_ctx(ctx) total_num += len(tokens) log_num += len(tokens) segment_ids = sample.segment_ids.as_in_ctx(ctx) if use_segmentation else None valid_length = sample.valid_length.as_in_ctx(ctx) p_mask = sample.masks.as_in_ctx(ctx) p_mask = 1 - p_mask # In the network, we use 1 --> no_mask, 0 --> mask start_top_logits, start_top_index, end_top_logits, end_top_index, answerable_logits \ = qa_net.inference(tokens, segment_ids, valid_length, p_mask, args.start_top_n, args.end_top_n) for i, qas_id in enumerate(sample.qas_id): result = RawResultExtended(qas_id=qas_id, start_top_logits=start_top_logits[i].asnumpy(), start_top_index=start_top_index[i].asnumpy(), end_top_logits=end_top_logits[i].asnumpy(), end_top_index=end_top_index[i].asnumpy(), answerable_logits=answerable_logits[i].asnumpy()) all_results.append(result) # logging if (batch_idx + 1) % log_interval == 0: # Output the loss of per step toc = time.time() logging.info( '[batch {}], Time cost={:.2f},' ' Throughput={:.2f} samples/s, ETA={:.2f}h'.format( batch_idx + 1, toc - tic, log_num / (toc - tic), (epoch_size - total_num) / (total_num / (toc - epoch_tic)) / 3600)) tic = time.time() log_num = 0 epoch_toc = time.time() logging.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic, total_num / (epoch_toc - epoch_tic)) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() no_answer_score_json = collections.OrderedDict() for index, (left_index, right_index) in enumerate(zip(dev_chunk_feature_ptr[:-1], dev_chunk_feature_ptr[1:])): chunked_features = dev_all_chunk_features[left_index:right_index] results = all_results[left_index:right_index] original_feature = dev_features[index] qas_ids = set([result.qas_id for result in results] + [feature.qas_id for feature in chunked_features]) assert len(qas_ids) == 1, 'Mismatch Occured between features and results' example_qas_id = list(qas_ids)[0] assert example_qas_id == original_feature.qas_id, \ 'Mismatch Occured between original feature and chunked features' not_answerable_score, best_pred, nbest_json = predict_extended( original_feature=original_feature, chunked_features=chunked_features, results=results, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_top_n=args.start_top_n, end_top_n=args.end_top_n) no_answer_score_json[example_qas_id] = not_answerable_score all_predictions[example_qas_id] = best_pred all_nbest_json[example_qas_id] = nbest_json if args.version == '2.0': exact = 'best_exact' f1 = 'best_f1' na_prob = no_answer_score_json else: exact = 'exact' f1 = 'f1' na_prob = None cur_eval, revised_predictions = squad_eval( dev_data_path, all_predictions, na_prob, revise=na_prob is not None) logging.info('The evaluated results are {}'.format(json.dumps(cur_eval))) cur_metrics = 0.5 * (cur_eval[exact] + cur_eval[f1]) if best_eval: best_metrics = 0.5 * (best_eval[exact] + best_eval[f1]) else: best_metrics = 0. if cur_metrics > best_metrics: logging.info('The evaluated files are saved in {}'.format(args.output_dir)) output_prediction_file = os.path.join(args.output_dir, 'predictions.json') output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json') na_prob_file = os.path.join(args.output_dir, 'na_prob.json') revised_prediction_file = os.path.join(args.output_dir, 'revised_predictions.json') with open(output_prediction_file, 'w') as of: of.write(json.dumps(all_predictions, indent=4) + '\n') with open(output_nbest_file, 'w') as of: of.write(json.dumps(all_nbest_json, indent=4) + '\n') with open(na_prob_file, 'w') as of: of.write(json.dumps(no_answer_score_json, indent=4) + '\n') with open(revised_prediction_file, 'w') as of: of.write(json.dumps(revised_predictions, indent=4) + '\n') best_eval = cur_eval best_eval.update({'best_ckpt': ckpt_name}) return best_eval
Model inference during validation or final evaluation.
eval_validation
python
dmlc/gluon-nlp
scripts/question_answering/run_squad.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py
Apache-2.0
def get_squad_examples(data_dir, segment='train', version='1.1'): """ Parameters ---------- data_dir The directory of the data segment The segment version Version of the SQuAD Returns ------- examples A list of SquadExampls objects """ if version == '1.1': train_path = os.path.join(data_dir, 'train-v1.1.json') dev_path = os.path.join(data_dir, 'dev-v1.1.json') elif version == '2.0': train_path = os.path.join(data_dir, 'train-v2.0.json') dev_path = os.path.join(data_dir, 'dev-v2.0.json') else: raise NotImplementedError if segment == 'train': examples = get_squad_examples_from_json(train_path, is_training=True) elif segment == 'dev': examples = get_squad_examples_from_json(dev_path, is_training=False) else: raise NotImplementedError return examples
Parameters ---------- data_dir The directory of the data segment The segment version Version of the SQuAD Returns ------- examples A list of SquadExampls objects
get_squad_examples
python
dmlc/gluon-nlp
scripts/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py
Apache-2.0
def gen_self_attn_mask(data, valid_length=None, dtype: type = np.float32, attn_type: str = 'full', layout: str = 'NT'): """Generate the mask used for the encoder, i.e, self-attention. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data with two samples: .. code-block:: none data = [['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ], ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']] valid_length = [8, 6] - attn_type = 'causal' Each token will attend to itself + the tokens before. It will not attend to tokens in the future. For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP'] 'I': 1, 0, 0, 0, 0, 0, 0, 0 'can': 1, 1, 0, 0, 0, 0, 0, 0 'now': 1, 1, 1, 0, 0, 0, 0, 0 'use': 1, 1, 1, 1, 0, 0, 0, 0 'numpy': 1, 1, 1, 1, 1, 0, 0, 0 'in': 1, 1, 1, 1, 1, 1, 0, 0 'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0 'NLP': 1, 1, 1, 1, 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>'] 'May': 1, 0, 0, 0, 0, 0, 0, 0 'the': 1, 1, 0, 0, 0, 0, 0, 0 'force': 1, 1, 1, 0, 0, 0, 0, 0 'be': 1, 1, 1, 1, 0, 0, 0, 0 'with': 1, 1, 1, 1, 1, 0, 0, 0 'you': 1, 1, 1, 1, 1, 1, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 - attn_type = 'full' Each token will attend to both the tokens before and in the future For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP'] 'I': 1, 1, 1, 1, 1, 1, 1, 1 'can': 1, 1, 1, 1, 1, 1, 1, 1 'now': 1, 1, 1, 1, 1, 1, 1, 1 'use': 1, 1, 1, 1, 1, 1, 1, 1 'numpy': 1, 1, 1, 1, 1, 1, 1, 1 'in': 1, 1, 1, 1, 1, 1, 1, 1 'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1 'NLP': 1, 1, 1, 1, 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>'] 'May': 1, 1, 1, 1, 1, 1, 0, 0 'the': 1, 1, 1, 1, 1, 1, 0, 0 'force': 1, 1, 1, 1, 1, 1, 0, 0 'be': 1, 1, 1, 1, 1, 1, 0, 0 'with': 1, 1, 1, 1, 1, 1, 0, 0 'you': 1, 1, 1, 1, 1, 1, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 Parameters ---------- data The data. - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) valid_length Shape (batch_size,) dtype Data type of the mask attn_type Can be 'full' or 'causal' layout The layout of the data Returns ------- mask Shape (batch_size, seq_length, seq_length) """ if layout == 'NT': batch_axis, time_axis = 0, 1 elif layout == 'TN': batch_axis, time_axis = 1, 0 else: raise NotImplementedError('Unsupported layout={}'.format(layout)) if attn_type == 'full': if valid_length is not None: valid_length = valid_length.astype(dtype) steps = npx.arange_like(data, axis=time_axis) # (seq_length,) mask1 = (npx.reshape(steps, (1, 1, -1)) < npx.reshape(valid_length, (-2, 1, 1))) mask2 = (npx.reshape(steps, (1, -1, 1)) < npx.reshape(valid_length, (-2, 1, 1))) mask = mask1 * mask2 else: # TODO(sxjscience) optimize seq_len_ones = np.ones_like(npx.arange_like(data, axis=time_axis)) # (seq_length,) batch_ones = np.ones_like(npx.arange_like(data, axis=batch_axis)) # (batch_size,) mask = batch_ones.reshape((-1, 1, 1)) * seq_len_ones.reshape((1, -1, 1))\ * seq_len_ones.reshape((1, 1, -1)) elif attn_type == 'causal': steps = npx.arange_like(data, axis=time_axis) # mask: (seq_length, seq_length) # batch_mask: (batch_size, seq_length) mask = (np.expand_dims(steps, axis=0) <= np.expand_dims(steps, axis=1)).astype(dtype) if valid_length is not None: valid_length = valid_length.astype(dtype) batch_mask = (np.expand_dims(steps, axis=0) < np.expand_dims(valid_length, axis=-1)).astype(dtype) mask = mask * np.expand_dims(batch_mask, axis=-1) else: batch_ones = np.ones_like(npx.arange_like(data, axis=batch_axis), dtype=dtype) # (batch_size,) mask = mask * batch_ones.reshape((-1, 1, 1)) else: raise NotImplementedError return mask.astype(np.bool)
Generate the mask used for the encoder, i.e, self-attention. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data with two samples: .. code-block:: none data = [['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ], ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']] valid_length = [8, 6] - attn_type = 'causal' Each token will attend to itself + the tokens before. It will not attend to tokens in the future. For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP'] 'I': 1, 0, 0, 0, 0, 0, 0, 0 'can': 1, 1, 0, 0, 0, 0, 0, 0 'now': 1, 1, 1, 0, 0, 0, 0, 0 'use': 1, 1, 1, 1, 0, 0, 0, 0 'numpy': 1, 1, 1, 1, 1, 0, 0, 0 'in': 1, 1, 1, 1, 1, 1, 0, 0 'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0 'NLP': 1, 1, 1, 1, 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>'] 'May': 1, 0, 0, 0, 0, 0, 0, 0 'the': 1, 1, 0, 0, 0, 0, 0, 0 'force': 1, 1, 1, 0, 0, 0, 0, 0 'be': 1, 1, 1, 1, 0, 0, 0, 0 'with': 1, 1, 1, 1, 1, 0, 0, 0 'you': 1, 1, 1, 1, 1, 1, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 - attn_type = 'full' Each token will attend to both the tokens before and in the future For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP'] 'I': 1, 1, 1, 1, 1, 1, 1, 1 'can': 1, 1, 1, 1, 1, 1, 1, 1 'now': 1, 1, 1, 1, 1, 1, 1, 1 'use': 1, 1, 1, 1, 1, 1, 1, 1 'numpy': 1, 1, 1, 1, 1, 1, 1, 1 'in': 1, 1, 1, 1, 1, 1, 1, 1 'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1 'NLP': 1, 1, 1, 1, 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>'] 'May': 1, 1, 1, 1, 1, 1, 0, 0 'the': 1, 1, 1, 1, 1, 1, 0, 0 'force': 1, 1, 1, 1, 1, 1, 0, 0 'be': 1, 1, 1, 1, 1, 1, 0, 0 'with': 1, 1, 1, 1, 1, 1, 0, 0 'you': 1, 1, 1, 1, 1, 1, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 '<PAD>': 0, 0, 0, 0, 0, 0, 0, 0 Parameters ---------- data The data. - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) valid_length Shape (batch_size,) dtype Data type of the mask attn_type Can be 'full' or 'causal' layout The layout of the data Returns ------- mask Shape (batch_size, seq_length, seq_length)
gen_self_attn_mask
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def gen_mem_attn_mask(mem, mem_valid_length, data, data_valid_length=None, dtype=np.float32, layout: str = 'NT'): """Generate the mask used for the decoder. All query slots are attended to the memory slots. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data + mem with a batch of two samples: .. code-block:: none mem = [['I', 'can', 'now', 'use'], ['May', 'the', 'force', '<PAD>']] mem_valid_length = [4, 3] data = [['numpy', 'in', 'Gluon@@', 'NLP' ], ['be', 'with', 'you', '<PAD>']] data_valid_length = [4, 3] For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use'] 'numpy': 1, 1, 1, 1 'in': 1, 1, 1, 1 'Gluon@@': 1, 1, 1, 1 'NLP': 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['be', 'with', 'you', '<PAD>'] 'May': 1, 1, 1, 0 'the': 1, 1, 1, 0 'force': 1, 1, 1, 0 '<PAD>': 0, 0, 0, 0 Parameters ---------- mem - layout = 'NT' Shape (batch_size, mem_length, C_mem) - layout = 'TN' Shape (mem_length, batch_size, C_mem) mem_valid_length : Shape (batch_size,) data - layout = 'NT' Shape (batch_size, query_length, C_data) - layout = 'TN' Shape (query_length, batch_size, C_data) data_valid_length : Shape (batch_size,) dtype Data type of the mask layout Layout of the data + mem tensor Returns ------- mask : Shape (batch_size, query_length, mem_length) """ if layout == 'NT': batch_axis, time_axis = 0, 1 elif layout == 'TN': batch_axis, time_axis = 1, 0 else: raise NotImplementedError('Unsupported layout={}'.format(layout)) mem_valid_length = mem_valid_length.astype(dtype) mem_steps = npx.arange_like(mem, axis=time_axis) # (mem_length,) data_steps = npx.arange_like(data, axis=time_axis) # (query_length,) mem_mask = (npx.reshape(mem_steps, (1, 1, -1)) < npx.reshape(mem_valid_length, (-2, 1, 1))).astype(dtype) # (B, 1, mem_length) if data_valid_length is not None: data_valid_length = data_valid_length.astype(dtype) data_mask = (npx.reshape(data_steps, (1, -1, 1)) < npx.reshape(data_valid_length, (-2, 1, 1))).astype(dtype) # (B, query_length, 1) mask = mem_mask * data_mask else: query_length_ones = np.ones_like(data_steps) mask = query_length_ones.reshape((1, -1, 1)) * mem_mask return mask.astype(np.bool)
Generate the mask used for the decoder. All query slots are attended to the memory slots. In our implementation, 1 --> not masked, 0 --> masked Let's consider the data + mem with a batch of two samples: .. code-block:: none mem = [['I', 'can', 'now', 'use'], ['May', 'the', 'force', '<PAD>']] mem_valid_length = [4, 3] data = [['numpy', 'in', 'Gluon@@', 'NLP' ], ['be', 'with', 'you', '<PAD>']] data_valid_length = [4, 3] For our example, the mask of the first sample is .. code-block:: none ['I', 'can', 'now', 'use'] 'numpy': 1, 1, 1, 1 'in': 1, 1, 1, 1 'Gluon@@': 1, 1, 1, 1 'NLP': 1, 1, 1, 1 The mask of the second sample is .. code-block:: none ['be', 'with', 'you', '<PAD>'] 'May': 1, 1, 1, 0 'the': 1, 1, 1, 0 'force': 1, 1, 1, 0 '<PAD>': 0, 0, 0, 0 Parameters ---------- mem - layout = 'NT' Shape (batch_size, mem_length, C_mem) - layout = 'TN' Shape (mem_length, batch_size, C_mem) mem_valid_length : Shape (batch_size,) data - layout = 'NT' Shape (batch_size, query_length, C_data) - layout = 'TN' Shape (query_length, batch_size, C_data) data_valid_length : Shape (batch_size,) dtype Data type of the mask layout Layout of the data + mem tensor Returns ------- mask : Shape (batch_size, query_length, mem_length)
gen_mem_attn_mask
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def masked_softmax(att_score, mask, axis: int = -1, temperature=None): """Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symbol or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) 1 --> The element is not masked 0 --> The element is masked axis The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] temperature The temperature. It scales down the scores before applying the softmax. Returns ------- att_weights : Symborl or NDArray Shape (..., length, ...) """ if mask is None: return npx.softmax(att_score, axis=axis, temperature=temperature) else: return npx.masked_softmax(att_score, mask=mask.astype(np.bool), axis=axis, temperature=temperature)
Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symbol or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) 1 --> The element is not masked 0 --> The element is masked axis The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] temperature The temperature. It scales down the scores before applying the softmax. Returns ------- att_weights : Symborl or NDArray Shape (..., length, ...)
masked_softmax
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def masked_logsoftmax(att_score, mask, axis: int = -1): """Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) mask = 1 --> not masked mask = 0 --> masked axis The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] Returns ------- logits : Symborl or NDArray Shape (..., length, ...) The masked values will be all zero """ if mask is None: return npx.log_softmax(att_score, axis=axis) else: mask = mask.astype(np.bool) return np.where(mask, npx.masked_log_softmax(att_score, mask, axis=axis), -np.inf)
Ignore the masked elements when calculating the softmax. The mask can be broadcastable. Parameters ---------- att_score : Symborl or NDArray Shape (..., length, ...) mask : Symbol or NDArray or None Shape (..., length, ...) mask = 1 --> not masked mask = 0 --> masked axis The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] Returns ------- logits : Symborl or NDArray Shape (..., length, ...) The masked values will be all zero
masked_logsoftmax
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def multi_head_dot_attn(query, key, value, mask=None, edge_scores=None, dropout: float = 0.0, scaled: bool = True, normalized: bool = False, eps: float = 1E-6, query_head_units: Optional[int] = None, layout: str = 'NKT', use_einsum: bool = False): """Multihead dot product attention between the query, key, value. scaled is False, normalized is False: D(h_q, h_k) = <h_q, h_k> scaled is True, normalized is False: D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q) scaled is False, normalized is True: D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> scaled is True, normalized is True: D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q) If edge_scores is provided, we will calcualte the attention as scores = D(h_q, h_k) + EdgeScore_{q, k} Parameters ---------- query Query. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, query_length, key_dim) - layout is 'NTK' Shape (batch_size, query_length, num_heads, key_dim) - layout is 'TNK' Shape (query_length, batch_size, num_heads, key_dim) key Key. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, mem_length, key_dim) - layout is 'NTK' Shape (batch_size, mem_length, num_heads, key_dim) - layout is 'TNK' Shape (mem_length, batch_size, num_heads, key_dim) value Value. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, mem_length, value_dim) - layout is 'NTK' Shape (batch_size, mem_length, num_heads, value_dim) - layout is 'TNK' Shape (mem_length, batch_size, num_heads, value_dim) mask Mask between query and memory. Shape (batch_size, query_length, mem_length) edge_scores The edge attention score. Shape can be any shape that is broadcastable to (batch_size, num_heads, query_length, mem_length) dropout Dropout rate scaled Whether to divide the attention weights by the sqrt of the query dimension. This is first proposed in "[NIPS2017] Attention is all you need.":: .. code-block:: none score = <h_q, h_k> / sqrt(dim_q) normalized If turned on, the cosine distance is used, i.e:: .. code-block:: none score = <h_q / ||h_q||, h_k / ||h_k||> eps The epsilon value used in L2 normalization query_head_units The units of each query head. If it's empty, we will estimate it via the shape_array of the query. layout This stands for the layout of the attention cell. The shape of the input/output will depend on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which 'N' means the batch_size, 'K' means the head, and 'T' means the length dimension. use_einsum Whether to use einsum for the computation Returns ------- context_vec - layout is 'NKT' or 'NTK' Shape (batch_size, query_length, num_heads * value_units) - layout is 'TNK' Shape (query_length, batch_size, num_heads * value_units) additional_info scores: Shape (batch_size, num_head, query_length, mem_length) attn_weight: Shape (batch_size, num_head, query_length, mem_length) """ # TODO(sxjscience) Profile layout if normalized: query = l2_normalize(query, axis=-1, eps=eps) key = l2_normalize(key, axis=-1, eps=eps) if scaled: if query_head_units is None: raise NotImplementedError('You will need to specify query_head_units!') else: scale = math.sqrt(query_head_units) else: scale = None if layout == 'NKT': # 1. Expand the dimension of the mask: # (B, L_query, L_mem) --> (B, 1, L_query, L_mem) if mask is not None: mask = np.expand_dims(mask, axis=1).astype(np.bool) # 2. Calculate the attention weights # Score: (B, N, L_query, C_Q) X (B, N, L_mem, C_Q) --> (B, N, L_query, L_mem) scores = npx.batch_dot(query, key, transpose_b=True) if edge_scores is not None: scores = scores + edge_scores attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale) attn_weights = npx.dropout(attn_weights, p=dropout) # 3. Calculate the context vector # (B, N, L_query, L_mem) X (B, N, L_mem, C_V) --> (B, L_query, N * C_V) if use_einsum: context_vec = np.einsum('bnij,bnjc->binc', attn_weights, value) else: context_vec = npx.batch_dot(attn_weights, value).transpose((0, 2, 1, 3)) context_vec = npx.reshape(context_vec, (-2, -2, -1)) elif layout == 'NTK': # 1. Expand the dimension of the mask: # (B, L_query, L_mem) --> (B, 1, L_query, L_mem) if mask is not None: mask = np.expand_dims(mask, axis=1).astype(np.bool) # 2. Calculate the attention weights # Score: (B, L_query, N, C_Q) X (B, L_mem, N, C_Q) --> (B, N, L_query, L_mem) if use_einsum: scores = np.einsum('binc,bjnc->bnij', query, key) else: scores = npx.batch_dot(np.swapaxes(query, 1, 2), np.swapaxes(key, 1, 2), transpose_b=True) if edge_scores is not None: scores = scores + edge_scores attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale) attn_weights = npx.dropout(attn_weights, p=dropout) # 3. Calculate the context vector # (B, N, L_query, L_mem) X (B, L_mem, N, C_V) --> (B, L_query, N * C_V) if use_einsum: context_vec = np.einsum('bnij,bjnc->binc', attn_weights, value) else: context_vec = npx.batch_dot(attn_weights, np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3)) context_vec = npx.reshape(context_vec, (-2, -2, -1)) elif layout == 'TNK': # 1. Expand the dimension of the mask: # (B, L_query, L_mem) --> (B, 1, L_query, L_mem) if mask is not None: mask = np.expand_dims(mask, axis=1).astype(np.bool) # 2. Calculate the attention weights # Score: (L_query, B, N, C_Q) X (L_mem, B, N, C_Q) --> (B, N, L_query, L_mem) # This layout structure can be implemented very efficiently because B, N are consecutive # to each other. To have a clear picture of what's happening, we may consider the # (i, j)th element of the output # out[i, j, :, :] = query[:, i, j, :] X key[:, i, j, :].T, which is just one GEMM call # We can thus implement the whole kernel via a single call of batched GEMM with stride. if use_einsum: scores = np.einsum('ibnc,jbnc->bnij', query, key) else: scores = npx.batch_dot(query.transpose((1, 2, 0, 3)), key.transpose((1, 2, 3, 0))) if edge_scores is not None: scores = scores + edge_scores attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale) attn_weights = npx.dropout(attn_weights, p=dropout) # 3. Calculate the context vector # (B, N, L_query, L_mem) X (L_mem, B, N, C_V) --> (L_query, B, N * C_V) # Again, we can implement it via a single call to batched GEMM with stride. # Shape (B, N, L_query, C_V) if use_einsum: context_vec = np.einsum('bnij,jbnc->ibnc', attn_weights, value) else: context_vec = npx.batch_dot(attn_weights, value.transpose((1, 2, 0, 3))).transpose((2, 0, 1, 3)) context_vec = npx.reshape(context_vec, (-2, -2, -1)) else: raise NotImplementedError('layout="{}" is not supported! ' 'We only support layout = "NKT", "NTK", and "TNK".' .format(layout)) return context_vec, [scores, attn_weights]
Multihead dot product attention between the query, key, value. scaled is False, normalized is False: D(h_q, h_k) = <h_q, h_k> scaled is True, normalized is False: D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q) scaled is False, normalized is True: D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> scaled is True, normalized is True: D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q) If edge_scores is provided, we will calcualte the attention as scores = D(h_q, h_k) + EdgeScore_{q, k} Parameters ---------- query Query. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, query_length, key_dim) - layout is 'NTK' Shape (batch_size, query_length, num_heads, key_dim) - layout is 'TNK' Shape (query_length, batch_size, num_heads, key_dim) key Key. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, mem_length, key_dim) - layout is 'NTK' Shape (batch_size, mem_length, num_heads, key_dim) - layout is 'TNK' Shape (mem_length, batch_size, num_heads, key_dim) value Value. The shape depends on the layout - layout is 'NKT' Shape (batch_size, num_heads, mem_length, value_dim) - layout is 'NTK' Shape (batch_size, mem_length, num_heads, value_dim) - layout is 'TNK' Shape (mem_length, batch_size, num_heads, value_dim) mask Mask between query and memory. Shape (batch_size, query_length, mem_length) edge_scores The edge attention score. Shape can be any shape that is broadcastable to (batch_size, num_heads, query_length, mem_length) dropout Dropout rate scaled Whether to divide the attention weights by the sqrt of the query dimension. This is first proposed in "[NIPS2017] Attention is all you need.":: .. code-block:: none score = <h_q, h_k> / sqrt(dim_q) normalized If turned on, the cosine distance is used, i.e:: .. code-block:: none score = <h_q / ||h_q||, h_k / ||h_k||> eps The epsilon value used in L2 normalization query_head_units The units of each query head. If it's empty, we will estimate it via the shape_array of the query. layout This stands for the layout of the attention cell. The shape of the input/output will depend on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which 'N' means the batch_size, 'K' means the head, and 'T' means the length dimension. use_einsum Whether to use einsum for the computation Returns ------- context_vec - layout is 'NKT' or 'NTK' Shape (batch_size, query_length, num_heads * value_units) - layout is 'TNK' Shape (query_length, batch_size, num_heads * value_units) additional_info scores: Shape (batch_size, num_head, query_length, mem_length) attn_weight: Shape (batch_size, num_head, query_length, mem_length)
multi_head_dot_attn
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def gen_rel_position(data, past_data=None, dtype=np.int32, layout='NT'): """Create a matrix of relative position for RelAttentionScoreCell. The relative position is defined as the index difference: `mem_i` - `query_j`. Note, though, that the implementation here makes sense in self-attention's setting, but not in cross-attention's. Hence, both `mem_i` and `query_j` are time indices from `data` (or, in incremental decoding's case, the concatenated sequence from the current stepwise `data` and the previous steps `past_data`). Parameters ---------- data The data. Under incremental decoding, seq_length = 1. - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) past_data This is only used under incremental decoding. Stacked data from previous steps. dtype Data type of the mask layout Layout of the data + past_data Returns ------- relative_position : Shape (query_length, mem_length) where query_length = mem_length = seq_length """ time_axis = 1 if layout == 'NT' else 0 if past_data is None: position = npx.arange_like(data, axis=time_axis) else: # for incremental decoding only, where past data is of the shape: # NT(NTK): (B, L_seq, num_heads, n_kv) -> (B, L_seq, inner_dim) # TN(TNK): (L_seq, B, num_heads, n_kv) -> (L_seq, B, inner_dim) past_data = npx.reshape(past_data, (-2, -2, -5)) position = npx.arange_like( np.concatenate([past_data, data], axis=time_axis), axis=time_axis ) query_position = np.expand_dims(position, axis=-1) mem_position = np.expand_dims(position, axis=0) relative_position = mem_position - query_position return relative_position.astype(np.int32) # shape (L_seq, L_seq)
Create a matrix of relative position for RelAttentionScoreCell. The relative position is defined as the index difference: `mem_i` - `query_j`. Note, though, that the implementation here makes sense in self-attention's setting, but not in cross-attention's. Hence, both `mem_i` and `query_j` are time indices from `data` (or, in incremental decoding's case, the concatenated sequence from the current stepwise `data` and the previous steps `past_data`). Parameters ---------- data The data. Under incremental decoding, seq_length = 1. - layout = 'NT' Shape (batch_size, seq_length, C) - layout = 'TN' Shape (seq_length, batch_size, C) past_data This is only used under incremental decoding. Stacked data from previous steps. dtype Data type of the mask layout Layout of the data + past_data Returns ------- relative_position : Shape (query_length, mem_length) where query_length = mem_length = seq_length
gen_rel_position
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def __init__(self, query_units, num_heads, pos_embed_units: Optional[int] = None, max_distance=None, bidirectional=False, num_buckets=None, method='transformer_xl', dropout: float = 0.0, dtype='float32', layout='NTK', use_einsum=False, embed_initializer=None): """ Parameters ---------- query_units num_heads pos_embed_units max_distance bidirectional num_buckets method dropout dtype layout use_einsum """ super().__init__() self._dropout = dropout self._method = method self._query_units = query_units self._num_heads = num_heads self._bidirectional = bidirectional self._num_buckets = num_buckets assert query_units % num_heads == 0, 'The units must be divisible by the number of heads.' self._head_query_units = query_units // num_heads self._max_distance = max_distance self._pos_embed_units = pos_embed_units self._dtype = dtype self._use_einsum = use_einsum self._layout = layout if self._layout not in ['NKT', 'NTK', 'TNK']: raise ValueError('layout="{}" is not supported'.format(self._layout)) if method == 'transformer_xl': if pos_embed_units is None: pos_embed_units = self._num_heads * self._head_query_units self._rel_pos_embed = SinusoidalPositionalEmbedding(units=pos_embed_units, dtype=self._dtype) self._rel_proj = nn.Dense(units=query_units, in_units=pos_embed_units, flatten=False, use_bias=False, dtype=self._dtype) self._dropout_layer = nn.Dropout(dropout) elif method == 'shaw': assert self._max_distance is not None, 'Must set max_distance when method="shaw".' if self._bidirectional: vocab_size = self._max_distance * 2 + 1 else: vocab_size = self._max_distance + 1 self._rel_pos_embed = LearnedPositionalEmbedding( units=self._num_heads * self._head_query_units, max_length=vocab_size, weight_initializer=mx.init.Xavier(rnd_type="gaussian", factor_type="in", magnitude=1), mode='wrap' if self._bidirectional else 'raise', dtype=self._dtype) elif method == 't5': if self._num_buckets is None: self._num_buckets = 32 if self._max_distance is None: self._max_distance = 128 self._rel_pos_embed = BucketPositionalEmbedding( units=num_heads, num_buckets=self._num_buckets, max_distance=self._max_distance, bidirectional=self._bidirectional, embed_initializer=embed_initializer, dtype=self._dtype) else: raise NotImplementedError('method="{}" is currently not supported!'.format(method))
Parameters ---------- query_units num_heads pos_embed_units max_distance bidirectional num_buckets method dropout dtype layout use_einsum
__init__
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def forward(self, rel_positions, query=None): """Forward function Parameters ---------- rel_positions The relative shifts. Shape (query_length, mem_length). Each element represents the shift between the :math:`i-th` element of query and the :math:`j-th` element of memory. query The query for computing the relative scores. The shape depends on the layout. If we use T5 attention, the query will not be used. Returns ------- rel_scores The relative attention scores Can have shape (batch_size, num_heads, query_length, mem_length) or (num_heads, query_length, mem_length) """ if self._method == 'transformer_xl' or self._method == 'shaw': assert query is not None, 'Must specify query if method={}'.format(self._method) if self._bidirectional: if self._max_distance is not None: rel_positions = np.clip(rel_positions, a_min=-self._max_distance, a_max=self._max_distance) else: if self._max_distance is not None: rel_positions = np.clip(rel_positions, a_min=0, a_max=self._max_distance) # uniq_rel.shape = (#uniq,), rev_index.shape = (L_q, L_m) uniq_rel, rev_index = np.unique(rel_positions, return_inverse=True) uniq_rel_pos_embed = self._rel_pos_embed(uniq_rel) if self._method == 'transformer_xl': uniq_rel_pos_embed = self._rel_proj(self._dropout_layer(uniq_rel_pos_embed)) # Shape (#uniq, K, C_q) uniq_rel_pos_embed = npx.reshape(uniq_rel_pos_embed, (-2, self._num_heads, self._head_query_units)) # Calculate the dot-product between query and the relative positional embeddings. # After the calculation, rel_score.shape = (L_q, #uniq, N, K) if self._layout == 'NKT': # query_for_rel: (N, K, L_q, C_q) if self._use_einsum: rel_score = np.einsum('bnid,jnd->ijbn', query, uniq_rel_pos_embed) else: rel_score = np.transpose( np.matmul(query, np.transpose(uniq_rel_pos_embed, (1, 2, 0))), (2, 3, 0, 1) ) elif self._layout == 'NTK': # query_for_rel: (N, L_q, K, C_q) if self._use_einsum: rel_score = np.einsum('bind,jnd->ijbn', query, uniq_rel_pos_embed) else: rel_score = np.transpose( np.matmul(np.swapaxes(query, 1, 2), np.transpose(uniq_rel_pos_embed, (1, 2, 0))), (2, 3, 0, 1) ) elif self._layout == 'TNK': # query_for_rel: (L_q, N, K, C_q) if self._use_einsum: rel_score = np.einsum('ibnd,jnd->ijbn', query, uniq_rel_pos_embed) else: rel_score = np.transpose( np.matmul(np.transpose(query, (1, 2, 0, 3)), np.transpose(uniq_rel_pos_embed, (1, 2, 0))), (2, 3, 0, 1) ) else: raise NotImplementedError # We use gather_nd to select the elements # TODO(sxjscience) Use advanced indexing once available rev_index = npx.reshape_like(rev_index, rel_positions).astype(np.int32) query_idx = np.expand_dims(npx.arange_like(rel_positions, axis=0).astype(np.int32), axis=-1) + np.zeros_like(rev_index) rel_score = npx.gather_nd(rel_score, np.stack([query_idx, rev_index])) rel_score = np.transpose(rel_score, (2, 3, 0, 1)) elif self._method == 't5': # shape is (K, L_q, L_m) rel_score = self._rel_pos_embed(rel_positions).transpose((2, 0, 1)) else: raise NotImplementedError return rel_score
Forward function Parameters ---------- rel_positions The relative shifts. Shape (query_length, mem_length). Each element represents the shift between the :math:`i-th` element of query and the :math:`j-th` element of memory. query The query for computing the relative scores. The shape depends on the layout. If we use T5 attention, the query will not be used. Returns ------- rel_scores The relative attention scores Can have shape (batch_size, num_heads, query_length, mem_length) or (num_heads, query_length, mem_length)
forward
python
dmlc/gluon-nlp
src/gluonnlp/attention_cell.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py
Apache-2.0
def get_home_dir(): """Get home directory for storing datasets/models/pre-trained word embeddings""" _home_dir = os.environ.get('GLUONNLP_HOME', os.path.join('~', '.gluonnlp')) # expand ~ to actual path _home_dir = os.path.expanduser(_home_dir) return _home_dir
Get home directory for storing datasets/models/pre-trained word embeddings
get_home_dir
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_data_home_dir(): """Get home directory for storing the datasets""" home_dir = get_home_dir() return os.path.join(home_dir, 'datasets')
Get home directory for storing the datasets
get_data_home_dir
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_model_zoo_home_dir(): """Get the local directory for storing pretrained models""" home_dir = get_home_dir() return os.path.join(home_dir, 'models')
Get the local directory for storing pretrained models
get_model_zoo_home_dir
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_model_zoo_checksum_dir(): """Get the directory that stores the checksums of the artifacts in the model zoo """ curr_dir = os.path.realpath(os.path.dirname(os.path.realpath(__file__))) check_sum_dir = os.path.join(curr_dir, 'models', 'model_zoo_checksums') return check_sum_dir
Get the directory that stores the checksums of the artifacts in the model zoo
get_model_zoo_checksum_dir
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_repo_url(): """Return the base URL for Gluon dataset and model repository """ default_repo = 's3://gluonnlp-numpy-data' repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo) if repo_url[-1] != '/': repo_url = repo_url + '/' return repo_url
Return the base URL for Gluon dataset and model repository
get_repo_url
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_repo_model_zoo_url(): """Return the base URL for GluonNLP Model Zoo""" repo_url = get_repo_url() model_zoo_url = repo_url + 'models/' return model_zoo_url
Return the base URL for GluonNLP Model Zoo
get_repo_model_zoo_url
python
dmlc/gluon-nlp
src/gluonnlp/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py
Apache-2.0
def get_norm_layer(normalization: str = 'layer_norm', axis: int = -1, epsilon: float = 1e-5, in_channels: int = 0, **kwargs): """ Get the normalization layer based on the type Parameters ---------- normalization The type of the layer normalization from ['layer_norm', 'no_norm', 'batch_norm'] axis The axis to normalize the epsilon The epsilon of the normalization layer in_channels Input channel Returns ------- norm_layer The layer normalization layer """ if isinstance(normalization, str): if normalization == 'layer_norm': norm_layer = nn.LayerNorm(axis=axis, epsilon=epsilon, in_channels=in_channels, **kwargs) elif normalization == 'no_norm': norm_layer = NoNorm(in_channels=in_channels, **kwargs) elif normalization == 'rms_norm': norm_layer = RMSNorm(in_channels=in_channels, **kwargs) elif normalization == 'identity': norm_layer = IdentityActivation() elif normalization == 'batch_norm': norm_layer = nn.BatchNorm(axis=axis, epsilon=epsilon, in_channels=in_channels, **kwargs) else: raise NotImplementedError('normalization={} is not supported'.format(normalization)) return norm_layer else: raise NotImplementedError('The type of normalization must be str')
Get the normalization layer based on the type Parameters ---------- normalization The type of the layer normalization from ['layer_norm', 'no_norm', 'batch_norm'] axis The axis to normalize the epsilon The epsilon of the normalization layer in_channels Input channel Returns ------- norm_layer The layer normalization layer
get_norm_layer
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def _fmt_and_check_cutoffs(cutoffs, vocab_size): """Parse and get the cutoffs used in adaptive embedding + adaptive softmax Parameters ---------- cutoffs The cutoffs of the vocab_size Size of the vocabulary Returns ------- cutoffs The parsed cutoffs, will be [0, c0, c1, ..., c_{k-1}, V] If the original cutoffs is empty or is None, return None """ # Sanity checks if cutoffs is None: return None if isinstance(cutoffs, int): cutoffs = [cutoffs] else: cutoffs = list(cutoffs) if len(cutoffs) == 0: return None if cutoffs != sorted(cutoffs): raise ValueError('cutoffs must be a sorted list of cutoff values. ' 'Got {}, but expected {}'.format(cutoffs, sorted(cutoffs))) if len(set(cutoffs)) != len(cutoffs): raise ValueError('cutoffs cannot contain duplicates! cutoffs={}'.format(cutoffs)) if not cutoffs: raise ValueError('cutoffs must not be empty. Got {}'.format(cutoffs)) if cutoffs[0] <= 0: raise ValueError('The first cutoff value ({}) must be greater 0.'.format(cutoffs[0])) if cutoffs[-1] >= vocab_size: raise ValueError( 'The last cutoff value ({}) must be smaller than vocab_size ({}).'.format( cutoffs[-1], vocab_size)) return cutoffs
Parse and get the cutoffs used in adaptive embedding + adaptive softmax Parameters ---------- cutoffs The cutoffs of the vocab_size Size of the vocabulary Returns ------- cutoffs The parsed cutoffs, will be [0, c0, c1, ..., c_{k-1}, V] If the original cutoffs is empty or is None, return None
_fmt_and_check_cutoffs
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def get_activation(act: Optional[Union[str, HybridBlock]]) -> HybridBlock: """Get the activation based on the string Parameters ---------- act The activation Returns ------- ret The activation layer """ if act is None: return lambda x: x if isinstance(act, str): if act == 'leaky': # TODO(sxjscience) Add regex matching here to parse `leaky(0.1)` return nn.LeakyReLU(0.1) elif act == 'identity': return IdentityActivation() elif act == 'elu': return ELU() elif act == 'gelu': return GELU(mode='erf') elif act == 'gelu(tanh)': return GELU(mode='tanh') elif act == 'gelu(sigmoid)': return GELU(mode='sigmoid') elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']: return nn.Activation(act) else: raise NotImplementedError('act={} is not supported'.format(act)) else: return act
Get the activation based on the string Parameters ---------- act The activation Returns ------- ret The activation layer
get_activation
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def __init__(self, units: int, dtype: Union[str, type] = 'float32'): """Use a geometric sequence of timescales. Parameters ---------- units The number of units for positional embedding dtype The dtype of the inner positional embeddings """ super().__init__() def _init_sinusoidal_base(units): half_units = units // 2 val = np.log(10000) / (half_units - 1) val = np.exp(np.arange(half_units, dtype=np.float32) * -val) return val self._units = units self._dtype = dtype sinusoidal_base = _init_sinusoidal_base(units) self.base_mult = Constant(sinusoidal_base)
Use a geometric sequence of timescales. Parameters ---------- units The number of units for positional embedding dtype The dtype of the inner positional embeddings
__init__
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def forward(self, positions): """ Parameters ---------- positions : NDArray Shape (..., ) Returns ------- ret : Shape (..., units) """ emb = np.expand_dims(positions.astype(self._dtype), axis=-1) * self.base_mult.data() sin_emb = np.sin(emb) cos_emb = np.cos(emb) if self._units % 2 == 0: return np.concatenate([sin_emb, cos_emb], axis=-1) else: return np.concatenate( [sin_emb, cos_emb, np.expand_dims(np.zeros_like(positions).astype(self._dtype), axis=-1)], axis=-1)
Parameters ---------- positions : NDArray Shape (..., ) Returns ------- ret : Shape (..., units)
forward
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def __init__(self, units: int = 512, hidden_size: int = 2048, use_bias=True, activation_dropout: float = 0.0, dropout: float = 0.1, weight_initializer=None, bias_initializer='zeros', activation='relu', use_gated_activation=False, normalization: str = 'layer_norm', layer_norm_eps: float = 1E-5, pre_norm: bool = False, dtype='float32', **kwargs): """ Parameters ---------- units hidden_size activation_dropout dropout weight_initializer bias_initializer activation normalization layer_norm or no_norm layer_norm_eps pre_norm Pre-layer normalization as proposed in the paper: "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers" """ super().__init__() self._dtype = dtype self._pre_norm = pre_norm self._use_gated_activation = use_gated_activation self._kwargs = OrderedDict([ ('units', units), ('hidden_size', hidden_size), ('activation_dropout', activation_dropout), ('activation', activation), ('dropout', dropout), ('normalization', normalization), ('layer_norm_eps', layer_norm_eps), ('pre_norm', pre_norm), ('dtype', self._dtype) ]) self.dropout_layer = nn.Dropout(dropout) self.activation_dropout_layer = nn.Dropout(activation_dropout) self.ffn_1 = nn.Dense(units=hidden_size, in_units=units, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=dtype) if use_gated_activation: self.gated_ffn_1 = nn.Dense(units=hidden_size, in_units=units, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=dtype) self.activation = get_activation(activation) self.ffn_2 = nn.Dense(units=units, in_units=hidden_size, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=dtype) # TODO(sxjscience) We may need to set the dtype flag in LayerNorm, need to double check self.layer_norm = get_norm_layer(in_channels=units, normalization=normalization, epsilon=layer_norm_eps, **kwargs)
Parameters ---------- units hidden_size activation_dropout dropout weight_initializer bias_initializer activation normalization layer_norm or no_norm layer_norm_eps pre_norm Pre-layer normalization as proposed in the paper: "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers"
__init__
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def forward(self, data): """ Parameters ---------- F data : Shape (B, seq_length, C_in) Returns ------- out : Shape (B, seq_length, C_out) """ residual = data if self._pre_norm: data = self.layer_norm(data) if self._use_gated_activation: gated_out = self.activation(self.gated_ffn_1(data)) out = gated_out * self.ffn_1(data) else: out = self.activation(self.ffn_1(data)) out = self.activation_dropout_layer(out) out = self.ffn_2(out) out = self.dropout_layer(out) out = out + residual if not self._pre_norm: out = self.layer_norm(out) return out
Parameters ---------- F data : Shape (B, seq_length, C_in) Returns ------- out : Shape (B, seq_length, C_out)
forward
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def __init__(self, vocab_size: int, embed_size: int, units: int, cutoffs: Optional[Union[int, List]] = None, div_val: float = 1.0, dtype='float32', scaled=True, embedding_initializer: InitializerType = None, weight_initializer: InitializerType = None): """ Parameters ---------- vocab_size The size of the vocabulary embed_size The base size of the embedding vectors. The embedding size of each cluster will be [embed_size / div_val**0, embed_size / div_val**1, embed_size / div_val**2, ...] units The number of units after the mapping cutoffs The cutoffs to slice the vocab to multiple clusters. It should be a sorted list. Each value should be between 1 --> vocab_size - 1. div_val The base denominator for computing the size of the embedding vector in each cluster. dtype The data type of layer scaled Whether to scale the embedding by sqrt(units) embedding_initializer Initializer of the embedding vectors weight_initializer Initializer of projection layers bias_initializer Initializer of the bias """ super().__init__() cutoffs = _fmt_and_check_cutoffs(cutoffs, vocab_size) if cutoffs is None: assert div_val == 1.0 self._dtype = dtype self._kwargs = OrderedDict([ ('cutoffs', cutoffs), ('vocab_size', vocab_size), ('embed_size', embed_size), ('units', units), ('div_val', div_val), ('dtype', dtype), ('scaled', scaled) ]) self._vocab_size = vocab_size self._cutoffs = cutoffs self._units = units self._embed_size = embed_size self._div_val = div_val self._scaled = scaled if self._scaled: self._emb_scale = units**0.5 if div_val == 1.0: self.embed0_weight = Parameter('embed0_weight', shape=(vocab_size, embed_size), init=embedding_initializer, allow_deferred_init=True) if units != embed_size: self.inter_proj0_weight = Parameter('inter_proj0_weight', shape=(embed_size, units), init=weight_initializer, allow_deferred_init=True) else: self.proj_layers = None else: self.proj_layers = nn.HybridSequential() for i, (l_idx, r_idx) in enumerate(zip([0] + cutoffs, cutoffs + [vocab_size])): inner_embed_size = int(embed_size / div_val**i) if inner_embed_size == 0: raise ValueError('div_val = {} is too large for the layer. Currently, the ' 'cutoffs are {} and the embed_size is {}. Using the ' 'div_val = {} will cause some clusters to have ' 'embed_size=0.'.format(div_val, cutoffs, embed_size, div_val)) setattr( self, 'embed{}_weight'.format(i), Parameter('embed{}_weight'.format(i), shape=(r_idx - l_idx, inner_embed_size), init=embedding_initializer, allow_deferred_init=True)) setattr(self, 'inter_proj{}_weight'.format(i), Parameter('inter_proj{}_weight'.format(i), shape=(inner_embed_size, units), init=weight_initializer, allow_deferred_init=True))
Parameters ---------- vocab_size The size of the vocabulary embed_size The base size of the embedding vectors. The embedding size of each cluster will be [embed_size / div_val**0, embed_size / div_val**1, embed_size / div_val**2, ...] units The number of units after the mapping cutoffs The cutoffs to slice the vocab to multiple clusters. It should be a sorted list. Each value should be between 1 --> vocab_size - 1. div_val The base denominator for computing the size of the embedding vector in each cluster. dtype The data type of layer scaled Whether to scale the embedding by sqrt(units) embedding_initializer Initializer of the embedding vectors weight_initializer Initializer of projection layers bias_initializer Initializer of the bias
__init__
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def forward(self, inp): # pylint: disable=arguments-differ """ Parameters ---------- inp Shape (...,) Returns ------- out Shape (..., units) """ if self._div_val == 1.0: emb = np.take(getattr(self, 'embed0_weight').data(), inp, axis=0) if self._units != self._embed_size: emb = np.dot(emb, getattr(self, 'inter_proj0_weight').data()) else: emb = None for i, (l_idx, r_idx) in enumerate(zip([0] + self._cutoffs, self._cutoffs + [self._vocab_size])): emb_i = np.take(getattr(self, 'embed{}_weight'.format(i)).data(), inp - l_idx, axis=0, mode='clip') emb_i = np.dot(emb_i, getattr(self, 'inter_proj{}_weight'.format(i)).data()) if emb is None: emb = emb_i else: emb = np.where(np.expand_dims((inp >= l_idx) * (inp < r_idx), axis=-1), emb_i, emb) if self._scaled: emb = emb * self._emb_scale return emb
Parameters ---------- inp Shape (...,) Returns ------- out Shape (..., units)
forward
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def __init__(self, vocab_size: int, embed_size: int, in_units: int, cutoffs: Optional[Union[int, List]] = None, div_val: float = 1.0, dtype='float32', use_bias=True, weight_initializer: InitializerType = None, bias_initializer: InitializerType = None): """ Parameters ---------- vocab_size Size of the vocabulary embed_size Base embedding size. The hidden will be first projected to embed_size and then project to vocab_size in_units The number of input units cutoffs The cutoff values div_val The base denominator for computing the size of the embedding vector in each cluster. dtype Data type use_bias Whether to use bias when computing the scores for the tokens weight_initializer bias_initializer """ super().__init__() cutoffs = _fmt_and_check_cutoffs(cutoffs, vocab_size) if cutoffs is None: assert div_val == 1.0 self._vocab_size = vocab_size self._embed_size = embed_size self._in_units = in_units self._cutoffs = cutoffs self._div_val = div_val if cutoffs is not None: self._num_tail_clusters = len(self._cutoffs) self._dtype = dtype self._kwargs = OrderedDict([ ('cutoffs', cutoffs), ('vocab_size', vocab_size), ('embed_size', embed_size), ('in_units', in_units), ('div_val', div_val), ('dtype', dtype), ('use_bias', use_bias) ]) if cutoffs is not None: self.tail_cluster_score_proj = nn.Dense(units=self._num_tail_clusters, in_units=embed_size, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer) self.inter_proj_l = nn.HybridSequential() self.out_proj_l = nn.HybridSequential() if div_val == 1.0: if in_units != embed_size: self.inter_proj_l.add(nn.Dense(in_units=in_units, units=embed_size, flatten=False, use_bias=False, weight_initializer=weight_initializer, bias_initializer=bias_initializer)) self.out_proj_l.add(nn.Dense(in_units=embed_size, units=vocab_size, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer)) else: for i, (l_idx, r_idx) in enumerate(zip([0] + self._cutoffs, self._cutoffs + [vocab_size])): ele_embed_size = int(embed_size / (div_val ** i)) self.inter_proj_l.add(nn.Dense(in_units=in_units, units=ele_embed_size, flatten=False, use_bias=False, weight_initializer=weight_initializer, bias_initializer=bias_initializer)) self.out_proj_l.add(nn.Dense(in_units=ele_embed_size, units=r_idx - l_idx, flatten=False, use_bias=use_bias, weight_initializer=weight_initializer, bias_initializer=bias_initializer))
Parameters ---------- vocab_size Size of the vocabulary embed_size Base embedding size. The hidden will be first projected to embed_size and then project to vocab_size in_units The number of input units cutoffs The cutoff values div_val The base denominator for computing the size of the embedding vector in each cluster. dtype Data type use_bias Whether to use bias when computing the scores for the tokens weight_initializer bias_initializer
__init__
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def get_logits(self, hidden): """Get all the logits. Parameters ---------- hidden The hidden representation/ Shape (..., in_units) Returns ------- logits Shape (..., :math:`|V|`) """ if self._cutoffs is None: if self._in_units != self._embed_size: hidden = self.inter_proj_l[0](hidden) logits = self.out_proj_l[0](hidden) return logits else: all_logits = [] if self._div_val == 1.0: if self._in_units == self._embed_size: all_scores = self.out_proj_l[0](hidden) tail_cluster_scores = self.tail_cluster_score_proj(hidden) else: inter_hidden = self.inter_proj_l[0](hidden) all_scores = self.out_proj_l[0](inter_hidden) tail_cluster_scores = self.tail_cluster_score_proj(inter_hidden) all_scores_l = np.split(all_scores, self._cutoffs, axis=-1) head_scores = all_scores_l[0] else: inter_hidden = self.inter_proj_l[0](hidden) head_scores = self.out_proj_l[0](inter_hidden) tail_cluster_scores = self.tail_cluster_score_proj(inter_hidden) head_tail_cluster_logits = \ npx.log_softmax(np.concatenate([head_scores, tail_cluster_scores], axis=-1), axis=-1) head_logits, tail_cluster_logits = \ np.split(head_tail_cluster_logits, [self._cutoffs[0]], axis=-1) tail_cluster_logits = np.split(tail_cluster_logits, self._num_tail_clusters, axis=-1) all_logits.append(head_logits) for i in range(1, len(self._cutoffs) + 1): if self._div_val == 1.0: ele_scores = all_scores_l[i] else: ele_scores = self.out_proj_l[i](self.inter_proj_l[i](hidden)) ele_logits = npx.log_softmax(ele_scores, axis=-1) ele_logits = tail_cluster_logits[-i] + ele_logits all_logits.append(ele_logits) return np.concatenate(all_logits, axis=-1)
Get all the logits. Parameters ---------- hidden The hidden representation/ Shape (..., in_units) Returns ------- logits Shape (..., :math:`|V|`)
get_logits
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def forward(self, hidden, target): """ Parameters ---------- hidden The hidden representation Shape (..., in_units) target The target representation Shape (...,) Returns ------- sel_logits The log probability that each hidden has when label == target """ # TODO(sxjscience) The computation here can be greatly accelerated! Due to the # missing feature of index_update, we are not able to do this here. logits = self.get_logits(hidden) sel_logits = npx.pick(logits, target, axis=-1) return sel_logits
Parameters ---------- hidden The hidden representation Shape (..., in_units) target The target representation Shape (...,) Returns ------- sel_logits The log probability that each hidden has when label == target
forward
python
dmlc/gluon-nlp
src/gluonnlp/layers.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py
Apache-2.0
def forward(self, pred, label): """ Parameters ---------- pred : The predictions of the network. Shape (..., V) label : The labels. Shape (..., ) Returns ------- loss : Shape (..., ) """ if not self._from_logits: pred = npx.log_softmax(pred, axis=-1) log_likelihood = npx.pick(pred, label, axis=-1) all_scores = pred.sum(axis=-1) loss = - (1 - self._alpha) * log_likelihood\ - self._alpha / float(self._num_labels) * all_scores return loss
Parameters ---------- pred : The predictions of the network. Shape (..., V) label : The labels. Shape (..., ) Returns ------- loss : Shape (..., )
forward
python
dmlc/gluon-nlp
src/gluonnlp/loss.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/loss.py
Apache-2.0
def select_vectors_by_position(data, positions): """Select each batch with the given positions. Once advanced indexing can be hybridized, we can revise the implementation. out[i, j, ...] = data[i, positions[i, j], ...] Parameters ---------- data Input tensor of contextualized token embeddings Shape (batch_size, seq_length, ...) positions Input tensor of the positions. Shape (batch_size, num_sel_positions). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The selection result. Shape (batch_size, num_sel_positions, ...) """ # Here, we use gather_nd to select the output from data: # Need to compute # out[i, j, :] = in[i, masked_position[i, j], :] # Thus, construct a indices with shape [2, batch_size, num_masked_position], where # indices[0, i, j] = i # indices[1, i, j] = masked_position[i, j] # Then, out = gather_nd(in, indices) positions = positions.astype(np.int32) # batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...] batch_idx = np.expand_dims(npx.arange_like(positions, axis=0), axis=1).astype(np.int32) batch_idx = batch_idx + np.zeros_like(positions) indices = np.stack([batch_idx, positions]) # TODO(sxjscience) We can revise the implementation to advanced indexing # once the bug in MXNet is solved: # https://github.com/apache/incubator-mxnet/issues/18919 out = npx.gather_nd(data, indices) return out
Select each batch with the given positions. Once advanced indexing can be hybridized, we can revise the implementation. out[i, j, ...] = data[i, positions[i, j], ...] Parameters ---------- data Input tensor of contextualized token embeddings Shape (batch_size, seq_length, ...) positions Input tensor of the positions. Shape (batch_size, num_sel_positions). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The selection result. Shape (batch_size, num_sel_positions, ...)
select_vectors_by_position
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def add_vectors_by_position(data, increment, positions): """Scatter each batch with the given positions. data[i, positions[i, j], ...] += increment[i, j, ...] Parameters ---------- data Input tensor of the array to be updated. Shape (batch_size, seq_length, ...) increment Input tensor of token ids Shape (batch_size, num_disp_position, ...) positions Input tensor of the positions. Shape (batch_size, num_disp_position). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The updated result. Shape (batch_size, seq_length, ...) """ # Here, we use index_add to disperse the output from data: # Need to compute # out[i, masked_position[i, j], :] = in[i, j, :] # Thus, construct an indices with shape [2, batch_size * num_masked_position], where # indices[0, i * num_masked_position + j] = i # indices[1, i * num_masked_position + j] = masked_position[i, j] # And convert data to the shape of the (batch_size * num_masked_position, ) # Then, out = npx.index_add(data, indices, increment) positions = positions.astype(np.int32) # batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...] batch_idx = np.expand_dims(npx.arange_like(positions, axis=0), axis=1).astype(np.int32) batch_idx = batch_idx + np.zeros_like(positions) indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))]) out = npx.index_add(data, indices, npx.reshape(increment, (-5, -4))) return out
Scatter each batch with the given positions. data[i, positions[i, j], ...] += increment[i, j, ...] Parameters ---------- data Input tensor of the array to be updated. Shape (batch_size, seq_length, ...) increment Input tensor of token ids Shape (batch_size, num_disp_position, ...) positions Input tensor of the positions. Shape (batch_size, num_disp_position). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The updated result. Shape (batch_size, seq_length, ...)
add_vectors_by_position
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def update_vectors_by_position(data, val, positions): """ Update each batch with the given positions. Considered as a reversed process of "select_vectors_by_position", this is an operator similar to "add_vectors_by_position" that updates the results instead of adding. data[i, positions[i, j], :] = val[i, j, :] Parameters ---------- data Input tensor of the array to be updated. Shape (batch_size, seq_length) val Input tensor of token ids Shape (batch_size, num_disp_position) positions Input tensor of the positions. Shape (batch_size, num_disp_position). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The updated result. Shape (batch_size, seq_length) """ positions = positions.astype(np.int32) # batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...] batch_idx = np.expand_dims(npx.arange_like(positions, axis=0), axis=1).astype(np.int32) batch_idx = batch_idx + np.zeros_like(positions) indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))]) out = npx.index_update(data, indices, npx.reshape(val, (-5, -4))) return out
Update each batch with the given positions. Considered as a reversed process of "select_vectors_by_position", this is an operator similar to "add_vectors_by_position" that updates the results instead of adding. data[i, positions[i, j], :] = val[i, j, :] Parameters ---------- data Input tensor of the array to be updated. Shape (batch_size, seq_length) val Input tensor of token ids Shape (batch_size, num_disp_position) positions Input tensor of the positions. Shape (batch_size, num_disp_position). For each sample in the batch, the values in this tensor must not exceed the length of the sequence. Returns ------- out The updated result. Shape (batch_size, seq_length)
update_vectors_by_position
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def gumbel_softmax(logits, temperature: float = 1.0, eps: float = 1E-10, hard=True, use_np_gumbel: bool = True): r"""Perform the gumbel-softmax trick to generate differentiable one-hot vectors from the input logits. Here, the gumbel distribution is Gumbel(\alpha) = -log (-log U) + \log \alpha, in which U is the uniform(0, 1) distribution. A nice property of Gumbel is: \argmax({Gumbel(\alpha_i)}) \sim multinomial(\alpha_i) The Gumbel-Softmax trick is to use the softmax + straight-through estimator to produce one-hot vectors that represent the sampling result. References: 1. https://en.wikipedia.org/wiki/Gumbel_distribution 2. [ICLR2017] Categorical Reparameterization with Gumbel-Softmax Parameters ---------- logits Logits. Shape (..., V) temperature The temperature that controls the eps The eps for stability of gradient hard Whether to use the straight-through estimator to produce one-hot vectors. use_np_gumbel Whether to use the random.gumble operator Returns ------- ret The returned output. Shape (..., V) """ # TODO(sxjscience) Investigate the impact of random.gumbel: # Actually, random.gumble has no eps and may have problem in calculating the gradient. if use_np_gumbel: gumbels = np.random.gumbel(np.zeros_like(logits)) else: u = np.random.uniform(np.zeros_like(logits), 1) gumbels = -np.log(-np.log(u + eps) + eps) y = npx.softmax((gumbels + logits) / temperature, axis=-1) if hard: y_hard = np.max(y, axis=-1, keepdims=True) == y y_hard = npx.stop_gradient(y_hard - y) + y return y_hard else: return y
Perform the gumbel-softmax trick to generate differentiable one-hot vectors from the input logits. Here, the gumbel distribution is Gumbel(\alpha) = -log (-log U) + \log \alpha, in which U is the uniform(0, 1) distribution. A nice property of Gumbel is: \argmax({Gumbel(\alpha_i)}) \sim multinomial(\alpha_i) The Gumbel-Softmax trick is to use the softmax + straight-through estimator to produce one-hot vectors that represent the sampling result. References: 1. https://en.wikipedia.org/wiki/Gumbel_distribution 2. [ICLR2017] Categorical Reparameterization with Gumbel-Softmax Parameters ---------- logits Logits. Shape (..., V) temperature The temperature that controls the eps The eps for stability of gradient hard Whether to use the straight-through estimator to produce one-hot vectors. use_np_gumbel Whether to use the random.gumble operator Returns ------- ret The returned output. Shape (..., V)
gumbel_softmax
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def trunc_gumbel(logits, truncation): """Sample from the TruncGumbel distribution. The cumulative density function (CDF) of the Truncated Gumbel distribution is defined as TruncGumbel(\alpha, truncation) \prop max(Gumbel(\alpha), truncation) To sample from the distribution, we can use the CDF inversion technique. References: 1. [NIPS2014] A* Sampling, https://papers.nips.cc/paper/5449-a-sampling.pdf 2. https://cmaddis.github.io/gumbel-machinery Parameters ---------- logits The logits. Shape (...,) truncation The truncation. Shape (...,) Returns ------- samples Samples from the TruncGumbel(logits, truncation) Shape (...,) """ gumbels = np.random.gumbel(np.zeros_like(logits)) + logits return -np.log(np.exp(-gumbels) + np.exp(-truncation))
Sample from the TruncGumbel distribution. The cumulative density function (CDF) of the Truncated Gumbel distribution is defined as TruncGumbel(lpha, truncation) \prop max(Gumbel(lpha), truncation) To sample from the distribution, we can use the CDF inversion technique. References: 1. [NIPS2014] A* Sampling, https://papers.nips.cc/paper/5449-a-sampling.pdf 2. https://cmaddis.github.io/gumbel-machinery Parameters ---------- logits The logits. Shape (...,) truncation The truncation. Shape (...,) Returns ------- samples Samples from the TruncGumbel(logits, truncation) Shape (...,)
trunc_gumbel
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def relative_position_bucket(relative_position, bidirectional: bool = True, num_buckets: int = 32, max_distance: int = 128): """Map the relative position to buckets. The implementation is consistent with that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637) where relative position is defined as `mem_i - query_j`. Thus, a positive value indicates that the memory slot is in a later timestamp than the query slot. After handling the bidirectional case (see below), the implementation uses the first half of buckets to store exact differences and the second half to store the differences after a logrithmic transformation. Parameters ---------- relative_position Shape (...,) bidirectional Whether we are dealing with bidirectional attention. If it's bidirectional, positive shifts are mapped to [0, num_buckets // 2), and negative shifts are mapped to [num_buckets // 2, num_buckets). num_buckets The number of buckets. max_distance Maximum distance. Positions that fall outside of 'max_distance' will be trimmed. Returns ------- buckets Shape (...,). It has the same shape as the `relative_position`. It will have int32 type. """ ret = 0 relative_position = -relative_position if bidirectional: assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \ 'divisible by 2.' num_buckets //= 2 ret = ret + (relative_position < 0).astype(np.int32) * num_buckets relative_position = np.abs(relative_position) else: # Clip all the negative values to 0 relative_position = np.clip(relative_position, a_min=0, a_max=None) # Now, the relative_position is in the range [0, inf) # Half of the buckets deal with the exact increments, # i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2 max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to # max_distance val_if_large = max_exact + ( np.log(relative_position.astype(np.float32) / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32) val_if_large = np.minimum(val_if_large, num_buckets - 1) ret = ret + np.where(is_small, relative_position, val_if_large) return ret
Map the relative position to buckets. The implementation is consistent with that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637) where relative position is defined as `mem_i - query_j`. Thus, a positive value indicates that the memory slot is in a later timestamp than the query slot. After handling the bidirectional case (see below), the implementation uses the first half of buckets to store exact differences and the second half to store the differences after a logrithmic transformation. Parameters ---------- relative_position Shape (...,) bidirectional Whether we are dealing with bidirectional attention. If it's bidirectional, positive shifts are mapped to [0, num_buckets // 2), and negative shifts are mapped to [num_buckets // 2, num_buckets). num_buckets The number of buckets. max_distance Maximum distance. Positions that fall outside of 'max_distance' will be trimmed. Returns ------- buckets Shape (...,). It has the same shape as the `relative_position`. It will have int32 type.
relative_position_bucket
python
dmlc/gluon-nlp
src/gluonnlp/op.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py
Apache-2.0
def _expand_to_beam_size(data, beam_size, batch_size, state_batch_axis=None): """Tile all the states to have batch_size * beam_size on the batch axis. Parameters ---------- data : A single mx.np.ndarray or nested container with mx.np.ndarray Each mx.np.ndarray should have shape (N, ...) when state_info is None, or same as the layout in state_info when it's not None. beam_size : int Beam size batch_size : int Batch size state_batch_axis : Nested structure of dictionary, default None. Descriptors for states, usually from decoder's ``state_batch_axis()``. When None, this method assumes that the batch axis is the first dimension. Returns ------- new_states : Object that contains mx.np.ndarray Each mx.np.ndarray should have shape batch_size * beam_size on the batch axis. """ if isinstance(data, (list, tuple)): if state_batch_axis is not None: # TODO(sxjscience) Better Exception Handling return [_expand_to_beam_size(d, beam_size, batch_size, batch_axis) for d, batch_axis in zip(data, state_batch_axis)] else: return [_expand_to_beam_size(d, beam_size, batch_size, None) for d in data] elif isinstance(data, dict): if state_batch_axis is not None: return {k: _expand_to_beam_size(v, beam_size, batch_size, state_batch_axis[k]) for k, v in data.items()} else: return {k: _expand_to_beam_size(v, beam_size, batch_size, None) for k, v in data.items()} elif isinstance(data, mx.np.ndarray): if state_batch_axis is None: batch_axis = 0 else: batch_axis = state_batch_axis if data.shape[batch_axis] != batch_size: raise ValueError('The batch size of all the inner elements in states must be ' '{}, Found shape={}, inferred batch axis={}'.format(batch_size, data.shape, batch_axis)) new_shape = list(data.shape) new_shape[batch_axis] = batch_size * beam_size new_shape = tuple(new_shape) bcast_new_shape = new_shape[:batch_axis] + (batch_size, beam_size) + new_shape[(batch_axis + 1):] return mx.np.expand_dims(data, batch_axis + 1).broadcast_to(bcast_new_shape).reshape(new_shape) elif data is None: return None else: raise NotImplementedError
Tile all the states to have batch_size * beam_size on the batch axis. Parameters ---------- data : A single mx.np.ndarray or nested container with mx.np.ndarray Each mx.np.ndarray should have shape (N, ...) when state_info is None, or same as the layout in state_info when it's not None. beam_size : int Beam size batch_size : int Batch size state_batch_axis : Nested structure of dictionary, default None. Descriptors for states, usually from decoder's ``state_batch_axis()``. When None, this method assumes that the batch axis is the first dimension. Returns ------- new_states : Object that contains mx.np.ndarray Each mx.np.ndarray should have shape batch_size * beam_size on the batch axis.
_expand_to_beam_size
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def _choose_states(states, indices, state_batch_axis=None): """ Parameters ---------- states : Object contains mx.np.ndarray indices : mx.np.ndarray Indices of the states to take. Shape (N,). state_batch_axis Descriptors for states, it is generated from decoder's ``state_batch_axis``. When None, this method assumes that the batch axis is the first dimension. Returns ------- new_states : Object contains mx.np.ndarray Each mx.np.ndarray should have shape (..., N, ...). """ if isinstance(states, (list, tuple)): if state_batch_axis is not None: return [_choose_states(d, indices, b_axis) for d, b_axis in zip(states, state_batch_axis)] else: return [_choose_states(d, indices, None) for d in states] elif isinstance(states, dict): if state_batch_axis is not None: return {k: _choose_states(v, indices, state_batch_axis[k]) for k, v in states.items()} else: return {k: _choose_states(v, indices, None) for k, v in states.items()} elif isinstance(states, mx.np.ndarray): if state_batch_axis is None: batch_axis = 0 else: batch_axis = state_batch_axis states = mx.np.take(states, indices, axis=batch_axis) return states else: raise TypeError('The type of the states is not supported, type(states) = {}'.format(type(states)))
Parameters ---------- states : Object contains mx.np.ndarray indices : mx.np.ndarray Indices of the states to take. Shape (N,). state_batch_axis Descriptors for states, it is generated from decoder's ``state_batch_axis``. When None, this method assumes that the batch axis is the first dimension. Returns ------- new_states : Object contains mx.np.ndarray Each mx.np.ndarray should have shape (..., N, ...).
_choose_states
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def __init__(self, beam_size, vocab_size, eos_id, scorer, state_batch_axis, stochastic=False): """ Parameters ---------- beam_size : int vocab_size : int eos_id : int scorer : BeamSearchScorer state_batch_axis : stochastic: bool prefix : None params : None """ super().__init__() self._beam_size = beam_size self._vocab_size = vocab_size self._eos_id = eos_id self._scorer = scorer self._state_batch_axis = state_batch_axis self.stochastic = stochastic assert eos_id is None or eos_id >= 0, 'eos_id cannot be negative! Received eos_id={}'.format(eos_id)
Parameters ---------- beam_size : int vocab_size : int eos_id : int scorer : BeamSearchScorer state_batch_axis : stochastic: bool prefix : None params : None
__init__
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def gumbel_with_maximum(self, phi, T, dim=-1): """Calculate the Gumbel with maximum. Parameters ---------- phi : mx.np.ndarray Shape (batch_size, beam_size, L). T : mx.np.ndarray The previous scores. Shape (batch_size, beam_size) """ g_phi = phi + mx.np.random.gumbel(mx.np.zeros_like(phi)) Z = g_phi.max(dim) g = self.shift_gumbel_maximum(g_phi, T, dim, Z=Z) return g
Calculate the Gumbel with maximum. Parameters ---------- phi : mx.np.ndarray Shape (batch_size, beam_size, L). T : mx.np.ndarray The previous scores. Shape (batch_size, beam_size)
gumbel_with_maximum
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def shift_gumbel_maximum(self, g_phi, T, axis=-1, Z=None): """ Parameters ---------- g_phi : mx.np.ndarray Shape (batch_size, beam_size, L). T : mx.np.ndarray The previous scores. Shape (batch_size, beam_size) axis The axis Z The Z value """ if Z is None: Z = g_phi.max(axis=axis) T_ = mx.npx.reshape(T, (-4, 1)) Z_ = mx.npx.reshape(Z, (-4, 1)) u = T_ - g_phi + mx.np.log1p(-mx.np.exp(g_phi - Z_) + 1e-5) return T_ - mx.npx.relu(u) - mx.np.log1p(mx.np.exp(-mx.np.abs(u)))
Parameters ---------- g_phi : mx.np.ndarray Shape (batch_size, beam_size, L). T : mx.np.ndarray The previous scores. Shape (batch_size, beam_size) axis The axis Z The Z value
shift_gumbel_maximum
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def forward(self, samples, valid_length, outputs, scores, step, beam_alive_mask, # pylint: disable=arguments-differ states, batch_shift): """ Parameters ---------- samples : mx.np.ndarray The current samples generated by beam search. Shape (batch_size, beam_size, L). valid_length : mx.np.ndarray The current valid lengths of the samples outputs : mx.np.ndarray Outputs from predictor. If from_logits was set to True in scorer, then it's the log probability of the current step. Else, it's the unnormalized outputs before softmax or log_softmax. Shape (batch_size * beam_size, V). scores : mx.np.ndarray The previous scores. Shape (batch_size, beam_size) step : mx.np.ndarray The current step for doing beam search. Begins from 1. Shape () beam_alive_mask : mx.np.ndarray Shape (batch_size, beam_size) states : nested structure of mx.np.ndarray Each mx.np.ndarray should have shape (N, ...) when state_info is None, or same as the layout in state_info when it's not None. batch_shift : mx.np.ndarray Contains [0, beam_size, 2 * beam_size, ..., (batch_size - 1) * beam_size]. Shape (batch_size,) Returns ------- new_samples : mx.np.ndarray or an empty list The updated samples. When single_step is False, shape (batch_size, beam_size, L + 1) new_valid_length : mx.np.ndarray Valid lengths of the samples. Shape (batch_size, beam_size) new_scores : mx.np.ndarray Shape (batch_size, beam_size) chosen_word_ids : mx.np.ndarray The chosen word ids of the step. Shape (batch_size, beam_size). If it's negative, no word will be appended to the beam. beam_alive_mask : mx.np.ndarray Shape (batch_size, beam_size) new_states : nested structure of mx.np.ndarray Inner mx.np.ndarrays have shape (batch_size * beam_size, ...) """ beam_size = self._beam_size vocab_size = self._vocab_size beam_alive_mask_bcast = mx.np.expand_dims(beam_alive_mask, axis=2) candidate_scores = self._scorer(mx.npx.reshape(outputs, (-6, -1, beam_size, -2)), scores, step) if self.stochastic: if step == 1: candidate_scores_gumbel\ = candidate_scores[:1]\ + mx.np.random.gumbel(mx.np.zeros_like(candidate_scores[:1])) candidate_scores_residual = candidate_scores[1:] candidate_scores = mx.np.concatenate((candidate_scores_gumbel, candidate_scores_residual), axis=0) else: candidate_scores = self.gumbel_with_maximum(candidate_scores, scores, -1) # Concat the candidate scores and the scores of the finished beams # The resulting candidate score will have shape (batch_size, beam_size * |V| + beam_size) candidate_scores = mx.np.where(beam_alive_mask_bcast, candidate_scores, mx.np.full_like(candidate_scores, LARGE_NEGATIVE_FLOAT)) finished_scores = mx.np.where(beam_alive_mask, mx.np.full_like(scores, LARGE_NEGATIVE_FLOAT), scores) candidate_scores = mx.np.concatenate([mx.npx.reshape(candidate_scores, (-2, -1)), finished_scores], axis=1) # Get the top K scores # new_scores and indices will have shape (batch_size, beam_size) new_scores, indices = mx.npx.topk(candidate_scores, axis=1, k=beam_size, ret_typ='both') indices = indices.astype(mx.np.int32) use_prev = (indices >= (beam_size * vocab_size)).astype(mx.np.int32) chosen_word_ids = mx.np.mod(indices, vocab_size) beam_ids = mx.np.where(use_prev, indices - beam_size * vocab_size, mx.np.floor(indices / vocab_size).astype(mx.np.int32)) batch_beam_indices = beam_ids + mx.np.expand_dims(batch_shift, axis=1) chosen_word_ids = mx.np.where(use_prev, - mx.np.ones_like(indices), chosen_word_ids) # Update the samples and vaild_length # TODO(sxjscience) The current implementation is quite tricky # We should wait for hybridizable advanced indexing to avoid this selected_samples = mx.np.take(mx.npx.reshape(samples, (-5, -2)), batch_beam_indices.reshape((-1,)), axis=0) new_samples = mx.npx.reshape(mx.np.concatenate([selected_samples, chosen_word_ids.reshape((-1, 1))], axis=1), (-6, -1, beam_size, -2)) new_valid_length = mx.np.take(valid_length.reshape((-1,)), batch_beam_indices.reshape((-1,)), axis=0).reshape((-1, beam_size)) + 1 - use_prev # Update the states new_states = _choose_states(states, batch_beam_indices.reshape((-1,)), self._state_batch_axis) # Update the alive mask. beam_alive_mask = mx.np.take(beam_alive_mask.reshape((-1,)), batch_beam_indices.reshape((-1,)), axis=0)\ .reshape((-1, beam_size)) if self._eos_id is not None: beam_alive_mask = beam_alive_mask * (chosen_word_ids != self._eos_id).astype(mx.np.float32) return new_samples, new_valid_length, new_scores, chosen_word_ids,\ beam_alive_mask, new_states
Parameters ---------- samples : mx.np.ndarray The current samples generated by beam search. Shape (batch_size, beam_size, L). valid_length : mx.np.ndarray The current valid lengths of the samples outputs : mx.np.ndarray Outputs from predictor. If from_logits was set to True in scorer, then it's the log probability of the current step. Else, it's the unnormalized outputs before softmax or log_softmax. Shape (batch_size * beam_size, V). scores : mx.np.ndarray The previous scores. Shape (batch_size, beam_size) step : mx.np.ndarray The current step for doing beam search. Begins from 1. Shape () beam_alive_mask : mx.np.ndarray Shape (batch_size, beam_size) states : nested structure of mx.np.ndarray Each mx.np.ndarray should have shape (N, ...) when state_info is None, or same as the layout in state_info when it's not None. batch_shift : mx.np.ndarray Contains [0, beam_size, 2 * beam_size, ..., (batch_size - 1) * beam_size]. Shape (batch_size,) Returns ------- new_samples : mx.np.ndarray or an empty list The updated samples. When single_step is False, shape (batch_size, beam_size, L + 1) new_valid_length : mx.np.ndarray Valid lengths of the samples. Shape (batch_size, beam_size) new_scores : mx.np.ndarray Shape (batch_size, beam_size) chosen_word_ids : mx.np.ndarray The chosen word ids of the step. Shape (batch_size, beam_size). If it's negative, no word will be appended to the beam. beam_alive_mask : mx.np.ndarray Shape (batch_size, beam_size) new_states : nested structure of mx.np.ndarray Inner mx.np.ndarrays have shape (batch_size * beam_size, ...)
forward
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def forward(self, inputs, states, src_seq_lengths=None): """Sample by beam search. Parameters ---------- inputs : mx.np.ndarray The initial input of the decoder. Shape is (batch_size,). states : Object that contains mx.np.ndarrays The initial states of the decoder. src_seq_lengths : mx.np.ndarray The source sequence lengths. Shape is (batch_size,). Returns ------- samples : mx.np.ndarray Samples draw by beam search. Shape (batch_size, beam_size, length). DType is int32. scores : mx.np.ndarray Scores of the samples. Shape (batch_size, beam_size). We make sure that scores[i, :] are in descending order. valid_length : mx.np.ndarray The valid length of the samples. Shape (batch_size, beam_size). DType is int32. """ ctx = inputs.ctx batch_size = inputs.shape[self._data_batch_axis] beam_size = self._beam_size if src_seq_lengths is not None: max_src_sequence_length = int(src_seq_lengths.asnumpy().max()) max_length = max(self._min_length, max_src_sequence_length * self._max_length_a + self._max_length_b) else: if self._max_length_a != 0: raise ValueError('If src_seq_lengths is not given, max_length_a must be 0!' ' Received {}' .format(self._max_length_a)) max_length = max(self._min_length, self._max_length_b) # Tile the states and inputs to have shape (batch_size * beam_size, ...) states = _expand_to_beam_size(states, beam_size=beam_size, batch_size=batch_size, state_batch_axis=self._state_batch_axis) step_input = _expand_to_beam_size(inputs, beam_size=beam_size, batch_size=batch_size, state_batch_axis=self._data_batch_axis).astype(mx.np.int32) # All beams are initialized to alive # Generated samples are initialized to be the inputs # Except the first beam where the scores are set to be zero, all beams have -inf scores. # Valid length is initialized to be 1 beam_alive_mask = mx.np.ones(shape=(batch_size, beam_size), ctx=ctx, dtype=mx.np.float32) valid_length = mx.np.ones(shape=(batch_size, beam_size), ctx=ctx, dtype=mx.np.int32) scores = mx.np.zeros(shape=(batch_size, beam_size), ctx=ctx) if beam_size > 1: scores[:, 1:beam_size] = LARGE_NEGATIVE_FLOAT samples = step_input.reshape((batch_size, beam_size, -1)) batch_shift = mx.np.arange(0, batch_size * beam_size, beam_size, ctx=ctx, dtype=mx.np.int32) step = mx.np.array(0, ctx=ctx, dtype=mx.np.float32) for i in range(max_length): log_probs, new_states = self._decoder(step_input, states) assert log_probs.shape[1] == self._vocab_size step = step + 1 samples, valid_length, scores, chosen_word_ids, beam_alive_mask, states = \ self._updater(samples, valid_length, log_probs, scores, step, beam_alive_mask, new_states, batch_shift) step_input = mx.npx.relu(chosen_word_ids).reshape((-1,)) if self._early_return: if mx.np.sum(beam_alive_mask).asnumpy() == 0: return samples, scores, valid_length beam_alive_mask = beam_alive_mask.astype(mx.np.int32) if self._eos_id is not None: final_word = mx.np.where(beam_alive_mask, mx.np.full((batch_size, beam_size), self._eos_id, ctx=ctx, dtype=mx.np.int32), mx.np.full((batch_size, beam_size), -1, ctx=ctx, dtype=mx.np.int32)) samples = mx.np.concatenate([samples, final_word.reshape((final_word.shape[0], final_word.shape[1], 1))], axis=2) valid_length = valid_length + beam_alive_mask return samples, scores, valid_length
Sample by beam search. Parameters ---------- inputs : mx.np.ndarray The initial input of the decoder. Shape is (batch_size,). states : Object that contains mx.np.ndarrays The initial states of the decoder. src_seq_lengths : mx.np.ndarray The source sequence lengths. Shape is (batch_size,). Returns ------- samples : mx.np.ndarray Samples draw by beam search. Shape (batch_size, beam_size, length). DType is int32. scores : mx.np.ndarray Scores of the samples. Shape (batch_size, beam_size). We make sure that scores[i, :] are in descending order. valid_length : mx.np.ndarray The valid length of the samples. Shape (batch_size, beam_size). DType is int32.
forward
python
dmlc/gluon-nlp
src/gluonnlp/sequence_sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py
Apache-2.0
def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, use_shared_mem, dtype, round_to=None): """Inner Implementation of the Pad batchify Parameters ---------- arrs : list pad_axis : int pad_val : number use_shared_mem : bool, default False dtype : round_to : int Returns ------- ret : NDArray original_length : NDArray """ if isinstance(arrs[0], mx.nd.NDArray): dtype = dtype or arrs[0].dtype arrs = [arr.asnumpy() for arr in arrs] elif not isinstance(arrs[0], np.ndarray): arrs = [np.asarray(ele) for ele in arrs] else: dtype = dtype or arrs[0].dtype original_length = [ele.shape[pad_axis] for ele in arrs] max_size = max(original_length) if round_to is not None: max_size = round_to * math.ceil(max_size / round_to) ret_shape = list(arrs[0].shape) ret_shape[pad_axis] = max_size ret_shape = (len(arrs), ) + tuple(ret_shape) ret = np.full(shape=ret_shape, fill_value=pad_val, dtype=dtype) for i, arr in enumerate(arrs): if arr.shape[pad_axis] == max_size: ret[i] = arr else: slices = [slice(None) for _ in range(arr.ndim)] slices[pad_axis] = slice(0, arr.shape[pad_axis]) if slices[pad_axis].start != slices[pad_axis].stop: slices = [slice(i, i + 1)] + slices ret[tuple(slices)] = arr ctx = mx.Context('cpu', 0) if use_shared_mem else mx.cpu() if is_np_array(): ret = mx.np.array(ret, ctx=ctx, dtype=dtype) else: ret = mx.nd.array(ret, ctx=ctx, dtype=dtype) return ret
Inner Implementation of the Pad batchify Parameters ---------- arrs : list pad_axis : int pad_val : number use_shared_mem : bool, default False dtype : round_to : int Returns ------- ret : NDArray original_length : NDArray
_pad_arrs_to_max_length
python
dmlc/gluon-nlp
src/gluonnlp/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/batchify.py
Apache-2.0
def __call__(self, data): """Batchify the input data. The input can be list of numpy.ndarray, list of numbers or list of mxnet.nd.NDArray. Inputting mxnet.nd.NDArray is discouraged as each array need to be converted to numpy for efficient padding. The arrays will be padded to the largest dimension at `axis` and then stacked to form the final output. In addition, the function will output the original dimensions at the `axis` if ret_length is turned on. Parameters ---------- data : List[np.ndarray] or List[List[dtype]] or List[mx.nd.NDArray] List of samples to pad and stack. Returns ------- batch_data: NDArray Data in the minibatch. Shape is (N, ...) """ if isinstance(data[0], mx.nd.NDArray) and not self._warned: self._warned = True #TODO(sxjscience) Investigate the warning warnings.warn( 'Using Pad with NDArrays is discouraged for speed reasons. ' 'Instead you should pad your data while it is still a list ' 'and before converting to an NDArray. ' 'Alternatively you can consider inputting a numpy.ndarray.') if isinstance(data[0], (mx.nd.NDArray, np.ndarray, list)): padded_arr = _pad_arrs_to_max_length(data, self._axis, self._val, False, self._dtype, round_to=self._round_to) return padded_arr else: raise NotImplementedError
Batchify the input data. The input can be list of numpy.ndarray, list of numbers or list of mxnet.nd.NDArray. Inputting mxnet.nd.NDArray is discouraged as each array need to be converted to numpy for efficient padding. The arrays will be padded to the largest dimension at `axis` and then stacked to form the final output. In addition, the function will output the original dimensions at the `axis` if ret_length is turned on. Parameters ---------- data : List[np.ndarray] or List[List[dtype]] or List[mx.nd.NDArray] List of samples to pad and stack. Returns ------- batch_data: NDArray Data in the minibatch. Shape is (N, ...)
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/batchify.py
Apache-2.0
def __call__(self, data): """Batchify the input data. Parameters ---------- data : list The samples to batchfy. Each sample should contain N attributes. Returns ------- ret : tuple A tuple of length N. Contains the batchified result of each attribute in the input. """ assert len(data[0]) == len(self._fn),\ 'The number of attributes in each data sample should contains' \ ' {} elements'.format(len(self._fn)) ret = [] for i, ele_fn in enumerate(self._fn): ret.append(ele_fn([ele[i] for ele in data])) return tuple(ret)
Batchify the input data. Parameters ---------- data : list The samples to batchfy. Each sample should contain N attributes. Returns ------- ret : tuple A tuple of length N. Contains the batchified result of each attribute in the input.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/batchify.py
Apache-2.0
def __call__(self, data: t_List[t_Dict]) -> t_Dict: """ Parameters ---------- data The samples to batchify. Each sample should be a dictionary Returns ------- ret The resulting dictionary that stores the merged samples. """ ret = dict() for k, ele_fn in self._fn_dict.items(): ret[k] = ele_fn([ele[k] for ele in data]) return ret
Parameters ---------- data The samples to batchify. Each sample should be a dictionary Returns ------- ret The resulting dictionary that stores the merged samples.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/batchify.py
Apache-2.0
def __call__(self, data: t_List[t_NamedTuple]) -> t_NamedTuple: """Batchify the input data. Parameters ---------- data The samples to batchfy. Each sample should be a namedtuple. Returns ------- ret A namedtuple of length N. Contains the batchified result of each attribute in the input. """ if not isinstance(data[0], self._container): raise ValueError('The samples should have the same type as the stored namedtuple.' ' data[0]={}, container={}'.format(data[0], self._container)) ret = [] for i, ele_fn in enumerate(self._fn_l): ret.append(ele_fn([ele[i] for ele in data])) return self._container(*ret)
Batchify the input data. Parameters ---------- data The samples to batchfy. Each sample should be a namedtuple. Returns ------- ret A namedtuple of length N. Contains the batchified result of each attribute in the input.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/batchify.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/batchify.py
Apache-2.0
def _words_match_regex(words: List[str], ignore_case=False, replace_white_space=False) -> Pattern: """Obtain the regex that finds whether a given corpus contains any word in the input words Parameters ---------- words Returns ------- regex """ words = [ele for ele in words if ele] if ignore_case: flags = re.IGNORECASE else: flags = 0 if replace_white_space: words = [ele.replace(' ', r'\s+') for ele in words] regex = re.compile('[^a-z]({words})[^a-z]|^({words})$|^({words})[^a-z]|[^a-z]({words})$' .format(words='|'.join(words)), flags) return regex
Obtain the regex that finds whether a given corpus contains any word in the input words Parameters ---------- words Returns ------- regex
_words_match_regex
python
dmlc/gluon-nlp
src/gluonnlp/data/filtering.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/filtering.py
Apache-2.0
def __call__(self, corpus: str): """ Parameters ---------- corpus Input corpus Returns ------- lang_label The ISO-639 1 code of the predicted language score The score of the prediction """ if self._use_fasttext: labels, scores = self._model.predict(corpus) label = labels[0].replace("__label__", "") return label, scores[0] else: return self._model.classify(corpus.lower())
Parameters ---------- corpus Input corpus Returns ------- lang_label The ISO-639 1 code of the predicted language score The score of the prediction
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/filtering.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/filtering.py
Apache-2.0
def _dataset_worker_fn(urls, dataset_fn, batch_sampler_fn): """Function to generate datasets and batch sampler for each worker.""" global _manager, _dataset dataset = dataset_fn(urls) batch_sampler = batch_sampler_fn(dataset) if _manager: dataset = _manager.list(zip(*dataset._data)) _dataset = dataset return dataset, batch_sampler
Function to generate datasets and batch sampler for each worker.
_dataset_worker_fn
python
dmlc/gluon-nlp
src/gluonnlp/data/loading.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/loading.py
Apache-2.0
def _batch_worker_fn(samples, batchify_fn, dataset=None, counter=None): """Function for processing data in worker process.""" # pylint: disable=unused-argument # it is required that each worker process has to fork a new MXIndexedRecordIO handle # preserving dataset as global variable can save tons of overhead and is safe in new process if len(dataset[0]) > 1: if isinstance(samples[0], (list, tuple)): batch = [batchify_fn([dataset[i] for i in shard]) for shard in samples] else: batch = batchify_fn([dataset[i] for i in samples]) else: if isinstance(samples[0], (list, tuple)): batch = [batchify_fn([dataset[i][0] for i in shard]) for shard in samples] else: batch = batchify_fn([dataset[i][0] for i in samples]) buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch) return buf.getvalue(), counter
Function for processing data in worker process.
_batch_worker_fn
python
dmlc/gluon-nlp
src/gluonnlp/data/loading.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/loading.py
Apache-2.0
def _push_next(self): """Assign next batch workload to workers.""" if self._batch_iter is not None: r = next(self._batch_iter, None) else: r = None if r is None: result = self._next_dataset() if result is None: return else: dataset, batch_sampler = result # Without checking the reference counts of previous datasets in the master process, # the key error can be triggered occasionally. This may be a bug in Python. self._count_dataset_ref(dataset) self._dataset = dataset # initialize reference counter if id(dataset) not in self._counter_ref: self._counter_ref[id(dataset)] = self._manager.Value('i', 0) self._batch_iter = iter(batch_sampler) self._push_next() else: counter = self._counter_ref[id(self._dataset)] counter.value += 1 async_ret = self._worker_pool.apply_async( self._worker_fn, (r, self._batchify_fn, self._dataset, counter)) self._data_buffer[self._sent_idx] = async_ret self._sent_idx += 1
Assign next batch workload to workers.
_push_next
python
dmlc/gluon-nlp
src/gluonnlp/data/loading.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/loading.py
Apache-2.0
def _push_next_dataset(self): """Assign next dataset workload to workers.""" current_dataset_idx = self._sent_idx * self._circle_length if current_dataset_idx < self._num_datasets: circle_length = min(self._circle_length, self._num_datasets - current_dataset_idx) urls = [self._dataset[current_dataset_idx + i] for i in range(circle_length)] else: return # push to worker asynchronously async_ret = self._worker_pool.apply_async( self._worker_fn, (urls, self._dataset_fn, self._batch_sampler_fn)) # data buffer stores the async result self._data_buffer[self._sent_idx] = async_ret self._sent_idx += 1
Assign next dataset workload to workers.
_push_next_dataset
python
dmlc/gluon-nlp
src/gluonnlp/data/loading.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/loading.py
Apache-2.0
def _next_dataset(self): """Retrieve the next dataset. Returns None if no dataset is available.""" if self._rcvd_idx == self._sent_idx: assert not self._data_buffer, 'Data buffer should be empty at this moment' return None assert self._rcvd_idx < self._sent_idx, \ 'rcvd_idx must be smaller than sent_idx' assert self._rcvd_idx in self._data_buffer, \ 'fatal error with _next_dataset, rcvd_idx missing' if len(self._cached_dataset) == 0 or self._data_buffer[self._rcvd_idx].ready(): ret = self._data_buffer.pop(self._rcvd_idx) dataset, batch_sampler = ret.get() self._rcvd_idx += 1 if self._cached and len(self._cached_dataset) < self._num_max_cached: self._cached_dataset.append((dataset, batch_sampler)) else: dataset, batch_sampler = self._cached_dataset.pop(0) return dataset, batch_sampler
Retrieve the next dataset. Returns None if no dataset is available.
_next_dataset
python
dmlc/gluon-nlp
src/gluonnlp/data/loading.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/loading.py
Apache-2.0
def __call__(self, max_lengths: Union[int, Sequence[int]], min_lengths: Union[int, Sequence[int]], num_buckets: int) -> List[int]: """Generate bucket keys based on the lengths of sequences and number of buckets. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets. """ raise NotImplementedError
Generate bucket keys based on the lengths of sequences and number of buckets. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/sampler.py
Apache-2.0
def __call__(self, max_lengths: Union[int, Sequence[int]], min_lengths: Union[int, Sequence[int]], num_buckets: int) -> List[int]: r"""This generate bucket keys given that all the buckets have the same width. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys : list of int A list including the keys of the buckets. """ if not isinstance(max_lengths, INT_TYPES): bucket_width_l = [max((1 + max_len - min_len) // num_buckets, 1) for max_len, min_len in zip(max_lengths, min_lengths)] bucket_keys = \ [tuple(max(max_len - i * width, min_len) for max_len, min_len, width in zip(max_lengths, min_lengths, bucket_width_l)) for i in range(num_buckets)] else: bucket_width = max((1 + max_lengths - min_lengths) // num_buckets, 1) bucket_keys = [max(max_lengths - i * bucket_width, min_lengths) for i in range(num_buckets)] return bucket_keys
This generate bucket keys given that all the buckets have the same width. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys : list of int A list including the keys of the buckets.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/sampler.py
Apache-2.0
def __call__(self, max_lengths: Union[int, Sequence[int]], min_lengths: Union[int, Sequence[int]], num_buckets: int) -> List[int]: r"""This function generates bucket keys with linearly increasing bucket width: Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets. """ if not isinstance(max_lengths, INT_TYPES): alpha_l = [2 * float(max_len - min_len - num_buckets) / (num_buckets * (num_buckets + 1)) for max_len, min_len in zip(max_lengths, min_lengths)] bucket_keys = \ [tuple(int(round(min_len + alpha * (((i + 1) * (i + 2)) / 2) + i + 1)) for min_len, alpha in zip(min_lengths, alpha_l)) for i in range(num_buckets)] bucket_keys[-1] = tuple(max(max_bucket_key, max_len) for max_bucket_key, max_len in zip(bucket_keys[-1], max_lengths)) else: alpha = 2 * float(max_lengths - min_lengths - num_buckets) \ / (num_buckets * (num_buckets + 1)) bucket_keys = [int(round(min_lengths + alpha * (((i + 1) * (i + 2)) / 2) + i + 1)) for i in range(num_buckets)] bucket_keys[-1] = max(bucket_keys[-1], max_lengths) return bucket_keys
This function generates bucket keys with linearly increasing bucket width: Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/sampler.py
Apache-2.0
def __call__(self, max_lengths: Union[int, Sequence[int]], min_lengths: Union[int, Sequence[int]], num_buckets: int) -> List[int]: r"""This function generates bucket keys exponentially increasing bucket width. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets. """ if not isinstance(max_lengths, INT_TYPES): initial_width_l = [ (max_len - min_len) * (self.bucket_len_step - 1) / (math.pow(self.bucket_len_step, num_buckets) - 1) for max_len, min_len in zip(max_lengths, min_lengths)] bucket_keys = \ [tuple( int(round(min_len + initial_width * (math.pow(self.bucket_len_step, i + 1) - 1) / (self.bucket_len_step - 1))) for min_len, initial_width in zip(min_lengths, initial_width_l)) for i in range(num_buckets)] bucket_keys[-1] = tuple(max(max_bucket_key, max_len) for max_bucket_key, max_len in zip(bucket_keys[-1], max_lengths)) else: initial_width = (max_lengths - min_lengths) * (self.bucket_len_step - 1) \ / (math.pow(self.bucket_len_step, num_buckets) - 1) bucket_keys = [ int(round(min_lengths + initial_width * (math.pow(self.bucket_len_step, i + 1) - 1) / (self.bucket_len_step - 1))) for i in range(num_buckets)] bucket_keys[-1] = max(bucket_keys[-1], max_lengths) return bucket_keys
This function generates bucket keys exponentially increasing bucket width. Parameters ---------- max_lengths Maximum of lengths of sequences. min_lengths Minimum of lengths of sequences. num_buckets Number of buckets Returns ------- bucket_keys A list including the keys of the buckets.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/sampler.py
Apache-2.0
def __repr__(self): """Return a string representing the statistics of the bucketing sampler. Returns ------- ret : str String representing the statistics of the buckets. """ ret = '{name}(\n' \ ' sample_num={sample_num}, batch_num={batch_num}\n' \ ' key={bucket_keys}\n' \ ' cnt={bucket_counts}\n' \ ' batch_size={bucket_batch_sizes}\n'\ ')'\ .format(name=self.__class__.__name__, sample_num=len(self._lengths), batch_num=len(self._batch_infos), bucket_keys=self._bucket_keys, bucket_counts=[len(sample_ids) for sample_ids in self._bucket_sample_ids], bucket_batch_sizes=self._bucket_batch_sizes) return ret
Return a string representing the statistics of the bucketing sampler. Returns ------- ret : str String representing the statistics of the buckets.
__repr__
python
dmlc/gluon-nlp
src/gluonnlp/data/sampler.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/sampler.py
Apache-2.0
def _check_special_token_identifier(key): """Raise error if the key is not valid as a key for the special token. Parameters ---------- key The identifier """ if not (key.endswith('_token') and key != '_token'): raise ValueError('Each key needs to have the form "name_token".' ' Received {}'.format(key))
Raise error if the key is not valid as a key for the special token. Parameters ---------- key The identifier
_check_special_token_identifier
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def to_tokens(self, idx: Union[int, Tuple[int], List[int], np.ndarray])\ -> Union[Hashable, List[Hashable]]: """Get the tokens correspond to the chosen indices Parameters ---------- idx The index used to select the tokens. Returns ------- ret The tokens of these selected indices. """ if isinstance(idx, (list, tuple)): return [self.all_tokens[i] for i in idx] elif isinstance(idx, np.ndarray): if idx.ndim == 0: return self.all_tokens[idx] elif idx.ndim == 1: return [self.all_tokens[i] for i in idx] else: raise ValueError('Unsupported numpy ndarray ndim={}'.format(idx.ndim)) else: return self.all_tokens[idx]
Get the tokens correspond to the chosen indices Parameters ---------- idx The index used to select the tokens. Returns ------- ret The tokens of these selected indices.
to_tokens
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def __getitem__(self, tokens: Union[Hashable, List[Hashable], Tuple[Hashable]])\ -> Union[int, List[int]]: """Looks up indices of text tokens according to the vocabulary. If `unknown_token` of the vocabulary is None, looking up unknown tokens results in KeyError. Parameters ---------- tokens A source token or tokens to be converted. Returns ------- ret A token index or a list of token indices according to the vocabulary. """ if isinstance(tokens, (list, tuple)): if self.has_unk: return [self._token_to_idx.get(token, self.unk_id) for token in tokens] else: return [self._token_to_idx[token] for token in tokens] else: if self.has_unk: return self._token_to_idx.get(tokens, self.unk_id) else: return self._token_to_idx[tokens]
Looks up indices of text tokens according to the vocabulary. If `unknown_token` of the vocabulary is None, looking up unknown tokens results in KeyError. Parameters ---------- tokens A source token or tokens to be converted. Returns ------- ret A token index or a list of token indices according to the vocabulary.
__getitem__
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def __call__(self, tokens: Union[Hashable, List[Hashable], Tuple[Hashable]])\ -> Union[int, np.ndarray]: """Looks up indices of text tokens according to the vocabulary. Parameters ---------- tokens A source token or tokens to be converted. Returns ------- ret A token index or a list of token indices according to the vocabulary. """ return self[tokens]
Looks up indices of text tokens according to the vocabulary. Parameters ---------- tokens A source token or tokens to be converted. Returns ------- ret A token index or a list of token indices according to the vocabulary.
__call__
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def to_json(self) -> str: """Serialize Vocab object into a json string. Returns ------- ret The serialized json string """ vocab_dict = dict() # Perform sanity check to make sure that we are able to reconstruct the original vocab for i, tok in enumerate(self._all_tokens): if self._token_to_idx[tok] != i: warnings.warn('The vocabulary is corrupted! One possible reason is that the ' 'tokens are changed manually without updating the ' '_token_to_idx map. Please check your code or report an issue in ' 'Github!') vocab_dict['all_tokens'] = self._all_tokens vocab_dict['special_token_key_value'] = self._special_token_kv ret = json.dumps(vocab_dict, ensure_ascii=False) return ret
Serialize Vocab object into a json string. Returns ------- ret The serialized json string
to_json
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def from_json(cls, json_str: Union[str, bytes, bytearray]) -> 'Vocab': """Deserialize Vocab object from json string. Parameters ---------- json_str Serialized json string of a Vocab object. Returns ------- vocab The constructed Vocab object """ vocab_dict = json.loads(json_str) all_tokens = vocab_dict.get('all_tokens') special_token_kv = vocab_dict.get('special_token_key_value') if 'unk_token' not in special_token_kv: special_token_kv['unk_token'] = None vocab = cls(tokens=all_tokens, **special_token_kv) return vocab
Deserialize Vocab object from json string. Parameters ---------- json_str Serialized json string of a Vocab object. Returns ------- vocab The constructed Vocab object
from_json
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def load_vocab(vocab: Union[str, Vocab]) -> Vocab: """Quick helper function to load vocabulary from a file. Parameters ---------- vocab Returns ------- """ if isinstance(vocab, Vocab): return vocab elif isinstance(vocab, str): return Vocab.load(vocab) else: raise NotImplementedError('Type of the input vocab is not supported. ' 'We only support "str" or "Vocab". type(vocab) = "{}".' .format(type(vocab)))
Quick helper function to load vocabulary from a file. Parameters ---------- vocab Returns -------
load_vocab
python
dmlc/gluon-nlp
src/gluonnlp/data/vocab.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/vocab.py
Apache-2.0
def get_token_type(tokens: Union[List[str], List[int], List[List[str]], List[List[int]]]) -> type: """ Parameters ---------- tokens The input tokens. Returns ------- token_type If the tokens is empty, return `str`. Otherwise, return `str` if the input is str and `int` if the input is int. """ if len(tokens) == 0: return str if isinstance(tokens[0], int): return int elif isinstance(tokens[0], str): return str elif isinstance(tokens[0], list): flatten_tokens_it = itertools.chain.from_iterable(tokens) try: first_token = next(flatten_tokens_it) return type(first_token) except StopIteration: return str else: raise TokenTypeNotSupportedError(type(tokens[0]))
Parameters ---------- tokens The input tokens. Returns ------- token_type If the tokens is empty, return `str`. Otherwise, return `str` if the input is str and `int` if the input is int.
get_token_type
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/base.py
Apache-2.0
def rebuild_offset_from_tokens(sentence: str, tokens: List[str]) \ -> List[Tuple[int, int]]: """Recover the offset of the tokens in the original sentence. If you are using a subword tokenizer, make sure to remove the prefix/postfix of the tokens before using this function. Also, this does not work for n-gram-based (n>1) subword tokenization, i.e. it works for "gluonnlp" --> ["gluon", "nlp"] but not for "gluonnlp" --> ["gl", "lu", "uo", "on", "nl", "lp"] Parameters ---------- sentence The input sentence tokens A list of strings that represent the tokenization result Returns ------- offsets A list of start+end pairs: [(start0, end0), (start1, end1), ...]. Each pair represents the start and end positions of the token in the original sentence. """ running_offset = 0 ret = [] for token in tokens: token_offset = sentence.index(token, running_offset) token_len = len(token) running_offset = token_offset + token_len ret.append((token_offset, running_offset)) return ret
Recover the offset of the tokens in the original sentence. If you are using a subword tokenizer, make sure to remove the prefix/postfix of the tokens before using this function. Also, this does not work for n-gram-based (n>1) subword tokenization, i.e. it works for "gluonnlp" --> ["gluon", "nlp"] but not for "gluonnlp" --> ["gl", "lu", "uo", "on", "nl", "lp"] Parameters ---------- sentence The input sentence tokens A list of strings that represent the tokenization result Returns ------- offsets A list of start+end pairs: [(start0, end0), (start1, end1), ...]. Each pair represents the start and end positions of the token in the original sentence.
rebuild_offset_from_tokens
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/base.py
Apache-2.0
def get_char_offset_from_byte_offset(sentence: str, byte_offsets: List[Tuple[int, int]]): """Get the character-level offsets based on the byte-level offsets Parameters ---------- sentence The input sentence byte_offsets The byte-level offsets Returns ------- char_offsets The character-level offsets """ byte_offset_to_char_offset = {} byte_offset = 0 for i, ele in enumerate(sentence): byte_offset_to_char_offset[byte_offset] = i byte_offset += len(ele.encode('utf-8')) byte_offset_to_char_offset[byte_offset] = i + 1 # Handle the last sentence ret = [] for ele in byte_offsets: ret.append((byte_offset_to_char_offset[ele[0]], byte_offset_to_char_offset[ele[1]])) return ret
Get the character-level offsets based on the byte-level offsets Parameters ---------- sentence The input sentence byte_offsets The byte-level offsets Returns ------- char_offsets The character-level offsets
get_char_offset_from_byte_offset
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/base.py
Apache-2.0
def encode(self, sentences: SentencesType, output_type: type = str) \ -> Union[TokensType, TokenIDsType]: """Encode the input sentence(s) into multiple tokens. Parameters ---------- sentences The sentences to tokenize output_type The type of the output tokens. - str means each token is represented by its original text. - int means each token is represented by the index in the vocabulary. Returns ------- tokens The output tokens. """ pass
Encode the input sentence(s) into multiple tokens. Parameters ---------- sentences The sentences to tokenize output_type The type of the output tokens. - str means each token is represented by its original text. - int means each token is represented by the index in the vocabulary. Returns ------- tokens The output tokens.
encode
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/base.py
Apache-2.0
def encode_with_offsets(self, sentences: SentencesType, output_type: type = str) \ -> Tuple[Union[TokensType, TokenIDsType], TokenOffsetsType]: """Encode the input sentence(s) into multiple tokens. Different from encode, it will also return the character start and end positions of each token in the original text. The original text is assumed to be Here, the default implementation is to use the tokenized result to recover the offsets. Parameters ---------- sentences The sentence(s) to tokenize output_type The type of the output tokens. - `str` means each token is represented by its original text. - `int` means each token is represented by the index in the vocabulary. Returns ------- tokens The output tokens. offsets The offsets of these tokens. Each encodes the start and end location in the original unicode string. We return the character-offset instead of the byte-offset. """ raise NotImplementedError
Encode the input sentence(s) into multiple tokens. Different from encode, it will also return the character start and end positions of each token in the original text. The original text is assumed to be Here, the default implementation is to use the tokenized result to recover the offsets. Parameters ---------- sentences The sentence(s) to tokenize output_type The type of the output tokens. - `str` means each token is represented by its original text. - `int` means each token is represented by the index in the vocabulary. Returns ------- tokens The output tokens. offsets The offsets of these tokens. Each encodes the start and end location in the original unicode string. We return the character-offset instead of the byte-offset.
encode_with_offsets
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/base.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/base.py
Apache-2.0
def is_new_version_model_file(model_file_path: str) -> bool: """Check whether the model file belongs to the new version of HuggingFace Tokenizers, i.e., >= 0.8 Parameters ---------- model_file_path Path to the model file Returns ------- is_new_version Whether the model file is generated by the new version of huggingface tokenizer. """ with open(model_file_path, 'r', encoding='utf-8') as f: try: _ = json.load(f) return True except Exception: return False
Check whether the model file belongs to the new version of HuggingFace Tokenizers, i.e., >= 0.8 Parameters ---------- model_file_path Path to the model file Returns ------- is_new_version Whether the model file is generated by the new version of huggingface tokenizer.
is_new_version_model_file
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def hf_encode(model, sentences, output_type: type = str): """ Parameters ---------- model Model object in HuggingFace tokenizer sentences Input sentences output_type Output type Returns ------- ret """ is_multi_sentences = isinstance(sentences, list) if not is_multi_sentences: sentences = [sentences] encode_sentences = model.encode_batch(sentences, add_special_tokens=False) if output_type is str: ret = [encode_sentence.tokens for encode_sentence in encode_sentences] elif output_type is int: ret = [encode_sentence.ids for encode_sentence in encode_sentences] else: raise TokenTypeNotSupportedError(output_type) if is_multi_sentences: return ret else: return ret[0]
Parameters ---------- model Model object in HuggingFace tokenizer sentences Input sentences output_type Output type Returns ------- ret
hf_encode
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def is_last_subword(self, tokens): """Whether the sub-token is the last sub-token in a split token list. Only supports the case when the tokenizer is a HuggingFaceBPETokenizer Parameters ---------- tokens A single token or a list of tokens Returns ------- ret The results """ assert self.model_type == 'BPEDecoder',\ 'Only supports BPE model. The model_type={}'.format(self.model_type) if isinstance(tokens, str): return tokens.endswith('</w>') elif isinstance(tokens, int): return tokens in self._last_subtoken_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [ele.endswith('</w>') for ele in tokens], False elif isinstance(tokens[0], int): return [ele in self._last_subtoken_id_set for ele in tokens], False else: raise NotImplementedError else: raise NotImplementedError
Whether the sub-token is the last sub-token in a split token list. Only supports the case when the tokenizer is a HuggingFaceBPETokenizer Parameters ---------- tokens A single token or a list of tokens Returns ------- ret The results
is_last_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def is_first_subword(self, tokens): """Whether the sub-token is the first sub-token in a token list. Only supports the case when the tokenizer is a HuggingFaceWordPieceTokenizer Parameters ---------- tokens A single token or a list of tokens Returns ------- ret The results """ assert self.model_type == 'WordPiece', \ 'Only supports WordPiece model. The model_type={}'.format(self.model_type) if isinstance(tokens, str): return not tokens.startswith('##') elif isinstance(tokens, int): return tokens in self._first_subtoken_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [not ele.startswith('##') for ele in tokens] elif isinstance(tokens[0], int): return [ele in self._first_subtoken_id_set for ele in tokens] else: raise NotImplementedError else: raise NotImplementedError
Whether the sub-token is the first sub-token in a token list. Only supports the case when the tokenizer is a HuggingFaceWordPieceTokenizer Parameters ---------- tokens A single token or a list of tokens Returns ------- ret The results
is_first_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def __init__(self, merges_file: Optional[str] = None, vocab_file: Optional[str] = None, unk_token: Optional[str] = Vocab.UNK_TOKEN, suffix: Optional[str] = '</w>', dropout: Optional[float] = None, lowercase: bool = False): """ Parameters ---------- merges_file The merges file saved by HuggingFace vocab_file Vocabulary file in GluonNLP unk_token The unknown token suffix The suffix for sub-tokens. For example, "Sunnyvale" will be "Sunny vale</w>" dropout Ratio of the BPE-Dropout lowercase Whether to lowercase the input before tokenizer """ super().__init__() self._merges_file = merges_file self._vocab_file = vocab_file self._unk_token = unk_token self._suffix = suffix self._dropout = dropout self._lowercase = lowercase self.__rebuild_tokenizer() self._last_subword_id_set = frozenset([self._vocab[ele] for ele in self._vocab.all_tokens if ele.endswith(self._suffix)])
Parameters ---------- merges_file The merges file saved by HuggingFace vocab_file Vocabulary file in GluonNLP unk_token The unknown token suffix The suffix for sub-tokens. For example, "Sunnyvale" will be "Sunny vale</w>" dropout Ratio of the BPE-Dropout lowercase Whether to lowercase the input before tokenizer
__init__
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def is_last_subword(self, tokens: Union[str, int, List[str], List[int]]) \ -> Union[bool, List[bool]]: """Whether the token is the last subword token. This can be used for whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the last subword token in the list of subwords. """ if isinstance(tokens, str): return tokens.endswith(self._suffix) elif isinstance(tokens, int): return tokens in self._last_subword_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [ele.endswith(self._suffix) for ele in tokens] elif isinstance(tokens[0], int): return [ele in self._last_subword_id_set for ele in tokens] else: raise NotImplementedError else: raise NotImplementedError
Whether the token is the last subword token. This can be used for whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the last subword token in the list of subwords.
is_last_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def is_first_subword(self, tokens: Union[str, int, List[str], List[int]]) \ -> Union[bool, List[bool]]: """Whether the token is the first subword token in a sequence of subword tokens. This can be used for implementing whole-word masking. We won't care about the special tokens Parameters ---------- tokens Returns ------- ret """ if isinstance(tokens, str): return not tokens.startswith(self._wordpieces_prefix) elif isinstance(tokens, int): return tokens in self._first_subword_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [not ele.startswith(self._wordpieces_prefix) for ele in tokens] elif isinstance(tokens[0], int): return [ele in self._first_subword_id_set for ele in tokens] else: raise NotImplementedError else: raise NotImplementedError
Whether the token is the first subword token in a sequence of subword tokens. This can be used for implementing whole-word masking. We won't care about the special tokens Parameters ---------- tokens Returns ------- ret
is_first_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/huggingface.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/huggingface.py
Apache-2.0
def is_first_subword(self, tokens: Union[str, int, List[str], List[int]]) \ -> Union[bool, List[bool]]: """Whether the token is the first subword token. This can be used to implement whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the first subword token in the list of subwords """ if isinstance(tokens, str): return tokens.startswith(self._meta_symbol) elif isinstance(tokens, int): return tokens in self._first_subword_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [ele.startswith(self._meta_symbol) for ele in tokens] elif isinstance(tokens[0], int): return [ele in self._first_subword_id_set for ele in tokens] else: raise NotImplementedError else: raise NotImplementedError
Whether the token is the first subword token. This can be used to implement whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the first subword token in the list of subwords
is_first_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/sentencepiece.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/sentencepiece.py
Apache-2.0
def set_subword_regularization(self, nbest, alpha): """Set the subword-regularization parameters For more details, you may refer to the official SentencePiece library: https://github.com/google/sentencepiece Parameters ---------- nbest alpha Returns ------- """ self._nbest = nbest self._alpha = alpha
Set the subword-regularization parameters For more details, you may refer to the official SentencePiece library: https://github.com/google/sentencepiece Parameters ---------- nbest alpha Returns -------
set_subword_regularization
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/sentencepiece.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/sentencepiece.py
Apache-2.0
def __getstate__(self): """Make the SentencepieceTokenizer pickleble. We will remove the _spt_cls and _sp_model, which are not picklable, and try to reconstruct the class via the saved model_path. This behavior is only acceptable for multiprocessing and should not be used to save sentencepiece models.""" state = self.__dict__.copy() state['_spt_cls'] = None state['_sp_model'] = None return state
Make the SentencepieceTokenizer pickleble. We will remove the _spt_cls and _sp_model, which are not picklable, and try to reconstruct the class via the saved model_path. This behavior is only acceptable for multiprocessing and should not be used to save sentencepiece models.
__getstate__
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/sentencepiece.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/sentencepiece.py
Apache-2.0
def transform_sentence(self, sentence): """replace the separator in encoded result with suffix a@@, b@@, c -> a, b, c</w> Parameters ---------- sentence Returns ------- new_sentence """ return [word[:-2] if len(word) > 2 and word[-2:] == self._separator else word + self._suffix for word in sentence]
replace the separator in encoded result with suffix a@@, b@@, c -> a, b, c</w> Parameters ---------- sentence Returns ------- new_sentence
transform_sentence
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/subword_nmt.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/subword_nmt.py
Apache-2.0
def is_last_subword(self, tokens: Union[str, int, List[str], List[int]]) \ -> Union[bool, List[bool]]: """Whether the token is the last subword token. This can be used for whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the last subword token in the list of subwords """ if isinstance(tokens, str): return not tokens.endswith(self._separator) elif isinstance(tokens, int): return tokens in self._last_subword_id_set elif isinstance(tokens, list): if len(tokens) == 0: return [] if isinstance(tokens[0], str): return [not ele.endswith(self._separator) for ele in tokens] elif isinstance(tokens[0], int): return [ele in self._last_subword_id_set for ele in tokens] else: raise NotImplementedError else: raise NotImplementedError
Whether the token is the last subword token. This can be used for whole-word masking. Parameters ---------- tokens The input tokens Returns ------- ret Whether the token is the last subword token in the list of subwords
is_last_subword
python
dmlc/gluon-nlp
src/gluonnlp/data/tokenizers/subword_nmt.py
https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/data/tokenizers/subword_nmt.py
Apache-2.0