text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value,
10,524
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, layer_head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_weights = (atten_out[1],) else: attn_weights = ()
10,524
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
attention_output = atten_out[0] hidden = hidden_states + self.norm(attention_output) if use_cache: outputs = (hidden, atten_out[2]) # hidden, present, (attentions) else: outputs = (hidden,) # hidden, (attentions) return outputs + attn_weights
10,524
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
class GPTSanJapaneseBlock(nn.Module): """ Self Attention and FFN Unit """ def __init__(self, config, ext_layer=False): super().__init__() self.self_attn = GPTSanJapaneseLayerSelfAttention(config) self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, output_router_tuple: Optional[bool] = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: r""" GPTSAN transformer block.
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`) : output attention probabirities. output_router_tuple: output experts router logits and expert id. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attention_output = atten_out[0]
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF): sparse_out = self.feed_forward(attention_output, output_router_tuple) if output_router_tuple: hidden, router_tuple = sparse_out else: hidden = sparse_out else: hidden = self.feed_forward(attention_output) outputs = (hidden,) + atten_out[1:] if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple: outputs += (router_tuple,) return outputs
10,525
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
class GPTSanJapanesePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTSanJapaneseConfig base_model_prefix = "gptsan_japanese" supports_gradient_checkpointing = False _no_split_modules = ["GPTSanJapaneseBlock"] _skip_keys_device_placement = "past_key_values" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, nn.LayerNorm): module.weight.data.fill_(factor * 1.0) module.bias.data.zero_() elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseModel): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None: module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseDenseActDense): # Mesh TensorFlow FF initialization
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, GPTSanJapaneseAttention): # Multi-headed attention d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) elif isinstance(module, GPTSanJapaneseSparseMLP): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " "See T5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
10,526
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel): def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) self.config = copy.deepcopy(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.last_project = nn.Linear(config.d_model, config.d_model, bias=True) self.act = ACT2FN["swish"] self.blocks = torch.nn.ModuleList([]) for _ in range(config.num_switch_layers): self.blocks.append(GPTSanJapaneseBlock(config)) for _ in range(config.num_ext_layers): self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True)) if config.num_ext_layers > 0: self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if config.d_spout: spouts = [] for _ in range(8): spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False)) spouts.append(nn.Tanh()) spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False)) self.spout = nn.Sequential(*spouts) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, num_precontext: Optional[torch.LongTensor] = None, ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]: r"""
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
num_precontext (`torch.LongTensor` of shape `(batch_size,1)`): length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like BERT, tokens after that refer only to front like GPT. see also: https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
Returns: `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns MoEModelOutputWithPastAndCrossAttentions insted of tuple """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict device = self.position_embeddings.weight.device if input_ids is None: input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None if inputs_embeds is not None: raise NotImplementedError( "GPTSanJapaneseModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead." ) num_pasts_contexts = 0 num_batch = input_ids.shape[0] pasts_or_spout_value = None if past_key_values is not None: num_pasts_contexts = past_key_values[0][0].shape[2] elif self.config.d_spout and spout is not None: # `spout` is a special input vector specific to GPTSAN
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# This controls the output by projecting embedded information such as the class of sentences during learning. # It should passed instead of the first past_key_value. # See the original GPTSAN repository for details num_pasts_contexts += 1
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# If there is an attention_mask, increase first one for spout if self.config.d_spout and spout is not None and attention_mask is not None: attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device) attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout attention_mask = attention_mask_with_spout # update attention_mask
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if num_precontext is not None: # `num_precontext` is the number of tokens that refer to each other in prefix-lm # created per batch, so dimension of num_precontext should be [batch, 1] if not ( len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1 ): # num_precontext Should be [batch,1] raise ValueError("num_precontext should be [batch, 1] size.") num_precontext = torch.reshape(num_precontext, [-1]) else: num_precontext = torch.zeros([num_batch]).int().to(device) num_input_contexts = input_ids.shape[1] num_output_contexts = num_input_contexts + num_pasts_contexts hidden_states = self.embed_tokens(input_ids)
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if past_key_values is not None: pasts_or_spout_value = past_key_values elif self.config.d_spout and spout is not None: # Make vector from `spout` of GPTSAN to the same shape as past_key_values pasts_or_spout_value = self.spout(spout) # projecting `spout` vector pasts_or_spout_value = torch.reshape( pasts_or_spout_value, [ num_batch, self.config.num_layers, 2, self.config.num_heads, num_pasts_contexts, self.config.d_model // self.config.num_heads, ], ) pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1) # make same shape as past_key_values pasts_or_spout_value = tuple( tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
) else: pasts_or_spout_value = [None] * self.config.num_layers
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# Token position considering spout and pasts token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts if attention_mask is None: attention_mask = torch.ones(num_batch, num_input_contexts, device=device) # positions for get position_embeddings gather_position = ( ( torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + token_position.unsqueeze(0) ) .transpose(1, 2) .long() ) # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2) gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# attention_mask is applied per batch for i in range(num_batch): hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i]) # Create a mask to be used when making the prefix Input length of Prefix-LM variable causal_mask = ( torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)) .view(1, 1, num_output_contexts, num_output_contexts) .to(device) ) prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :] if token_type_ids is not None: token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2) prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float() # Marge prefix_lm_mask and attention_mask extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
# Prepare head mask if needed if head_mask is not None: head_mask = self.get_head_mask( head_mask, self.config.num_switch_layers + self.config.num_ext_layers ) # n_layer x batch x n_heads x N x N # outputs present_key_value_states = () if self.config.use_cache or use_cache else None all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None all_attentions = () if self.config.output_attentions or output_attentions else None all_router_probs = () if self.config.output_router_logits or output_router_logits else None
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
for layer, past in enumerate(pasts_or_spout_value): if layer == self.config.num_switch_layers: if self.config.num_ext_layers > 0: # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model. # However, it is created when you create an additional layer and partially train only that location. # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository. for i in range(num_batch): hidden_states[i] += torch.gather( self.extra_position_embeddings.weight, dim=0, index=gather_position[i] )
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
output_router_tuple = ( self.config.output_router_logits or output_router_logits ) and layer < self.config.num_switch_layers block_output = self.blocks[layer]( hidden_states=hidden_states, past_key_value=past, attention_mask=extended_attention_mask, head_mask=head_mask, use_cache=self.config.use_cache or use_cache, output_attentions=self.config.output_attentions or output_attentions, output_router_tuple=output_router_tuple, )
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
outpos = 0 hidden_states = block_output[outpos] if self.config.output_hidden_states or output_hidden_states: all_hidden_states += (hidden_states,) if self.config.use_cache or use_cache: outpos += 1 present = block_output[outpos] present_key_value_states += (present,) if self.config.output_attentions or output_attentions: outpos += 1 attention_probs = block_output[outpos] all_attentions += (attention_probs,) if output_router_tuple: outpos += 1 router_tuple = block_output[outpos] all_router_probs.append(router_tuple[0]) hidden_states = self.last_project(hidden_states) hidden_states = self.act(hidden_states) if self.config.output_hidden_states or output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, router_probs=all_router_probs, )
10,527
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.model = GPTSanJapaneseModel(config) self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size])) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) if not self.config.torchscript: self.lm_head.weight = self.model.embed_tokens.weight
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]: r"""
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
Returns: `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple Example: Text Generation with regular LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> gen_token = model.generate(input_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..." ``` Text Generation with Prefix-LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
>>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..." ``` Simultaneously Text Generation And Masked Language Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
>>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。" >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1) >>> tokenizer.decode(out_mlm_token[0]) "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
>>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :]) "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..." ```""" SEG_TOKEN = self.config.separator_token_id use_cache = use_cache or self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_return_dict = True num_precontext = None if input_ids is not None: num_batch = input_ids.shape[0] num_precontext = torch.zeros([num_batch]).int().to(input_ids.device) where_separators = torch.where(input_ids == SEG_TOKEN) num_precontext[where_separators[0]] += where_separators[1] num_precontext = num_precontext.unsqueeze(1)
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
outputs = self.model( input_ids, attention_mask, token_type_ids, spout, past_key_values, head_mask, use_cache, inputs_embeds, decoder_inputs_embeds, output_attentions, output_hidden_states, model_return_dict, output_router_logits, num_precontext, ) lm_logits = self.lm_head(outputs[0]) if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]: lm_logits = lm_logits + self.final_logits_bias loss = None z_loss = None router_probs = None aux_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs) z_loss = router_z_loss_func(router_logits) router_probs = nn.Softmax(dim=-1)(router_logits) aux_loss = load_balancing_loss_func(router_probs, expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: return tuple( v for v in [ loss, lm_logits, outputs.past_key_values, outputs.hidden_states, outputs.router_probs, z_loss, aux_loss, ] if v is not None )
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
return MoECausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_probs, z_loss=z_loss, aux_loss=aux_loss, )
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[Union[List, torch.FloatTensor]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, **kwargs, ): if isinstance(spout, list): spout = torch.tensor(spout).float() if input_ids is not None: spout = spout.to(input_ids.device) if past_key_values is not None: return { "input_ids": input_ids[:, -1:] if input_ids is not None else None, "attention_mask": attention_mask, "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None, "spout": spout, "past_key_values": past_key_values, } return { "input_ids": input_ids, "attention_mask": attention_mask,
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
"token_type_ids": token_type_ids, "spout": spout, "past_key_values": None, }
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_input_embeddings(self): return self.model.get_input_embeddings()
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
10,528
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
class GPTSanJapaneseTokenizer(PreTrainedTokenizer): """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling - Return token_type_ids for Prefix-LM model The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a sentence of the prefix part and the part after it as a text pair of batch input. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> # You can confirm both 慶応 and 慶應 are encoded to 17750 >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"] [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] >>> # Both 慶応 and 慶應 are decoded to 慶応 >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]) '吾輩は猫である🐯。実は慶応(慶応)大学出身' ``` Example for Prefix-LM: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"] [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
>>> # Mask for Prefix-LM inputs >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"] [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ``` Example for batch encode: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"] [[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]] >>> # Mask for Prefix-LM inputs >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"] [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] >>> # Mask for padding >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ```
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
Args: vocab_file (`str`): File containing the vocabulary. emoji_file (`str`): File containing the emoji. unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`): The token used for unknown charactor pad_token (`str`, *optional*, defaults to `"<|separator|>"`): The token used for padding bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`): A special token to separate token to prefix part and general input part. do_clean_text (`bool`, *optional*, defaults to `False`): Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE. """
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def __init__( self, vocab_file, emoji_file, unk_token="<|nottoken|>", pad_token="<|separator|>", bos_token="<|startoftext|>", eos_token="<|endoftext|>", sep_token="<|segmenter|>", do_clean_text=False, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(emoji_file): raise ValueError( f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.do_clean_text = do_clean_text
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file) self.subword_tokenizer = SubWordJapaneseTokenizer( vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji )
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
super().__init__( unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs, ) @property def vocab_size(self): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) def get_vocab(self): return dict(self.raw_vocab, **self.added_tokens_encoder) def _tokenize(self, text): return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.subword_tokenizer.convert_id_to_token(index)
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" words = [] byte_tokens = [] for word in tokens: if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) byte_tokens = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word]) elif word == "<SP>": words.append(" ") elif word == "<BR>": words.append("\n") elif word == "<TAB>": words.append("\t") elif word == "<BLOCK>": words.append("▀") elif word == "<KIGOU>": words.append("ǀ")
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
elif word == "<U2000U2BFF>": words.append("‖") elif word == "<|bagoftoken|>": if len(words) > 0: words.append(words[-1]) words.append(words[-1]) words.append(words[-1]) elif word.startswith("<|") and word.endswith("|>"): words.append("") else: words.append(word) if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) text = "".join(words) return text
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: vocab_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(vocab_file, "w", encoding="utf-8") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index:
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(",".join(token) + "\n") index += 1 with open(emoji_file, "w", encoding="utf-8") as writer: json.dump(self.emoji, writer) return vocab_file, emoji_file
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: # docstyle-ignore """ The tokenizer returns token_type_ids as separators between the Prefix part and the rest. token_type_ids is 1 for the Prefix part and 0 for the rest of the token. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("アイウエ") >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ | >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | >>> x_token = tokenizer("", prefix_text="アイウエ") >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG | >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
>>> x_token = tokenizer("ウエ", prefix_text="アイ") >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ | >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | ```""" prefix_len = 0 if self.sep_token in self.vocab: segid = self.vocab[self.sep_token] if segid in token_ids_0: prefix_len = token_ids_0.index(segid) if token_ids_1 is None: total_len = len(token_ids_0) else: total_len = len(token_ids_0 + token_ids_1) return prefix_len * [1] + (total_len - prefix_len) * [0]
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs): # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation. # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest. if add_sep_token is None: add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly prepared = self.bos_token if self.bos_token in self.vocab else "" prepared += prefix_text if prefix_text is not None else "" if add_sep_token: prepared += self.sep_token if self.sep_token in self.vocab else "" prepared += text return (prepared, kwargs)
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding:
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
# This tokenizer converts input text pairs into Prefix input and subsequent input if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): # As a single text with an explicit un-prefix position batch_prefix_texts = [] for pref, txt in batch_text_or_text_pairs: batch_prefix_texts.append(pref + self.sep_token + txt) batch_text_or_text_pairs = batch_prefix_texts
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
return super()._batch_encode_plus( batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs, )
10,529
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
class SubWordJapaneseTokenizer: """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the original repository. MIT License Copyright (c) 2020 tanreinama Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def __init__(self, vocab, ids_to_tokens, emoji): self.vocab = vocab # same as swe self.ids_to_tokens = ids_to_tokens # same as bpe self.emoji = emoji self.maxlen = np.max([len(w) for w in self.vocab.keys()]) self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") self.content_repatter4 = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) self.content_repatter5 = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
) self.content_repatter6 = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def __len__(self): return len(self.ids_to_tokens) def clean_text(self, content): content = self.content_repatter1.sub("<URL>", content) content = self.content_repatter2.sub("<EMAIL>", content) content = self.content_repatter3.sub("<TEL>", content) content = self.content_repatter4.sub("<DATE>", content) content = self.content_repatter5.sub("<DATE>", content) content = self.content_repatter6.sub("<PRICE>", content) content = content.translate(self.content_trans1) while "<BLOCK><BLOCK>" in content: content = content.replace("<BLOCK><BLOCK>", "<BLOCK>") return content
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def tokenize(self, text, clean=False): text = text.replace(" ", "<SP>") text = text.replace(" ", "<SP>") text = text.replace("\r\n", "<BR>") text = text.replace("\n", "<BR>") text = text.replace("\r", "<BR>") text = text.replace("\t", "<TAB>") text = text.replace("—", "ー") text = text.replace("−", "ー") for k, v in self.emoji["emoji"].items(): if k in text: text = text.replace(k, v) if clean: text = self.clean_text(text) def check_simbol(x): e = x.encode() if len(x) == 1 and len(e) == 2: c = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def checku2e(x): e = x.encode() if len(x) == 1 and len(e) == 3: c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0xE28080 and c <= 0xE2B07F: return True return False
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
pos = 0 result = [] while pos < len(text): end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 candidates = [] # (token_id, token, pos) for e in range(end, pos, -1): wd = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(wd) > 2: candidates = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: # the smallest token_id is adopted _, wd, e = sorted(candidates, key=lambda x: x[0])[0] result.append(wd) pos = e else: end = pos + 1 wd = text[pos:end] if check_simbol(wd): result.append("<KIGOU>") elif checku2e(wd):
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
result.append("<U2000U2BFF>") else: for i in wd.encode("utf-8"): result.append("<|byte%d|>" % i) pos = end return result
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
def convert_id_to_token(self, index): return self.ids_to_tokens[index][0]
10,530
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
class GPTSanJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
Arguments: vocab_size (`int`, *optional*, defaults to 36000): Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`]. max_position_embeddings (`int`, *optional*, defaults to 1280): The maximum sequence length that this model might ever be used with. Defaults set this to 1280. d_model (`int`, *optional*, defaults to 1024): Size of the encoder layers and the pooler layer. d_ff (`int`, *optional*, defaults to 8192): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. d_ext (`int`, *optional*, defaults to 4096): Size of the intermediate feed forward layer in each Extra-layers. d_spout (`int`, *optional*, defaults to 128): Size of the `spout` vector. num_switch_layers (`int`, *optional*, defaults to 10):
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
Number of layers in the Switch Transformer layer. num_ext_layers (`int`, *optional*, defaults to 0): Number of layers in the Extra-layers. num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 16): Number of experts for each SwitchTransformer layer. expert_capacity (`int`, *optional*, defaults to 128): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. dropout_rate (`float`, *optional*, defaults to 0.0): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router.
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2) during training. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. output_hidden_states (`bool`, *optional*, default to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers.
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
initializer_factor (`float`, *optional*, defaults to 0.002): A factor for initializing all weight matrices. output_router_logits (`bool`, *optional*, default to `False`): Whether or not to return the router logits of all experts. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) """
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
model_type = "gptsan-japanese" keys_to_ignore_at_inference = [ "past_key_values", ] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", }
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
def __init__( self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-5, router_bias=False, router_jitter_noise=0.0, router_dtype="float32", router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.d_ff = d_ff self.d_ext = d_ext self.d_spout = d_spout
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
self.num_switch_layers = num_switch_layers self.num_ext_layers = num_ext_layers self.num_layers = num_switch_layers + num_ext_layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.output_hidden_states = output_hidden_states self.output_attentions = output_attentions self.initializer_factor = initializer_factor self.output_router_logits = output_router_logits self.use_cache = use_cache
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
super().__init__( separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs, )
10,531
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
class XGLMTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" XGLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip>
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip>
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`):
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = XGLMTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", **kwargs, ): # Compatibility with the original tokenizer self.num_madeup_words = 7 madeup_words = [f"<madeupword{i}>" for i in range(self.num_madeup_words)] kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ]
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>`
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + sep + token_ids_1 def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(sep + token_ids_0) * [0] return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." )
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
10,532
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm_fast.py
class FlaxXGLMAttention(nn.Module): config: XGLMConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} " f"and `num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout)
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
@nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend # to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states)
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py