text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask )
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, )
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights
10,533
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMDecoderLayer(nn.Module): config: XGLMConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxXGLMAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
10,534
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if self.config.add_cross_attention: self.encoder_attn = FlaxXGLMAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense( self.config.ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
10,534
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer.__call__ def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states
10,534
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states
10,534
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
10,534
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMDecoderLayerCollection(nn.Module): config: XGLMConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxXGLMDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_layers) ] self.layerdrop = self.config.layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
10,535
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],)
10,535
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_self_attns, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
10,535
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMModule(nn.Module): config: XGLMConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 self.embed_tokens = nn.Embed( self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), )
10,536
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# XGLM is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = create_sinusoidal_positions( self.config.max_position_embeddings + self.offset, embed_dim ) self.layers = FlaxXGLMDecoderLayerCollection(self.config, self.dtype) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1])
10,536
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # embed positions position_ids = position_ids + self.offset positions = jnp.take(self.embed_positions, position_ids, axis=0) hidden_states = inputs_embeds + positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states)
10,536
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
10,536
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMPreTrainedModel(FlaxPreTrainedModel): config_class = XGLMConfig base_model_prefix: str = "model" module_class: nn.Module = None def __init__( self, config: XGLMConfig, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng}
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) random_params = module_init_outputs["params"]
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"])
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
@add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, past_key_values: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if encoder_hidden_states is not None and encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} inputs = {"params": params or self.params}
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxXGLMAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, )
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
# add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs
10,537
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMModel(FlaxXGLMPreTrainedModel): module_class = FlaxXGLMModule
10,538
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMForCausalLMModule(nn.Module): config: XGLMConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.model = FlaxXGLMModule(self.config, self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), )
10,539
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0]
10,539
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["embed_tokens"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
10,539
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class FlaxXGLMForCausalLM(FlaxXGLMPreTrainedModel): module_class = FlaxXGLMForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape
10,540
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since GPT2 uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, }
10,540
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
10,540
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_flax_xglm.py
class TFXGLMAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel"""
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states)
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape)
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), )
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim])
10,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class TFXGLMDecoderLayer(keras.layers.Layer): def __init__(self, config: XGLMConfig, **kwargs: Any) -> None: super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFXGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, name="self_attn", ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
if config.add_cross_attention: self.encoder_attn = TFXGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, name="encoder_attn", ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization( epsilon=1e-5, name="encoder_attn_layer_norm" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer.call def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`):
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(decoder_attention_heads,)* cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. *(decoder_attention_heads,)* past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states)
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, )
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim])
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
10,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class TFXGLMMainLayer(keras.layers.Layer): config_class = XGLMConfig def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs, **kwargs: Any ) -> None: super().__init__(*inputs, **kwargs) self.config = config self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = TFSharedEmbeddings( config.vocab_size, config.d_model, self.padding_idx, name="embed_tokens" ) self.offset = 2 self._embed_positions_weights = create_sinusoidal_positions( num_positions=config.max_position_embeddings + self.offset, embedding_dim=config.d_model, padding_idx=config.pad_token_id, )
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
self.dropout = keras.layers.Dropout(config.dropout) self.layers = [TFXGLMDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_layers)] self.layerdrop = config.layerdrop self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_input_embeddings(self) -> TFSharedEmbeddings: return self.embed_tokens def set_input_embeddings(self, value: TFSharedEmbeddings) -> None: self.embed_tokens = value
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def _prepare_decoder_attention_mask( self, attention_mask: tf.Tensor | None, input_shape: tf.TensorShape, past_key_values_length: int, ) -> tf.Tensor: # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length) combined_attention_mask = tf.cond( input_shape[-1] > 1, lambda: combined_attention_mask, lambda: tf.ones_like(combined_attention_mask) ) if attention_mask is None: return combined_attention_mask expand_attention_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) return expand_attention_mask + combined_attention_mask def embed_positions(self, position_ids: np.ndarray | tf.Tensor | None = None) -> tf.Tensor: position_ids += self.offset positions = tf.gather(self._embed_positions_weights, position_ids, axis=0) return positions
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
@unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if position_ids is None: position_ids = tf.expand_dims( tf.range(past_key_values_length, input_shape[-1] + past_key_values_length), axis=0 ) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(position_ids) hidden_states = tf.cast(inputs_embeds, dtype=tf.float32) + positions hidden_states = self.dropout(hidden_states, training=training)
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), )
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, ) if use_cache: next_decoder_cache += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attentions += (layer_cross_attn,) hidden_states = self.layer_norm(hidden_states)
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
# add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
10,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class TFXGLMPreTrainedModel(TFPreTrainedModel): config_class = XGLMConfig base_model_prefix = "model"
10,544
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class TFXGLMModel(TFXGLMPreTrainedModel): """ Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`] Args: config: XGLMConfig embed_tokens: [TFSharedEmbeddings]: output embedding """ def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any ) -> None: super().__init__(config, *inputs, **kwargs) self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model")
10,545
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
@unpack_inputs @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None,
10,545
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, )
10,545
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None)
10,545
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class TFXGLMForCausalLM(TFXGLMPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"model.embed_positions.weights", r"lm_head.weight", ] _keys_to_ignore_on_save = [ r"model.embed_positions.weights", ] def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any ) -> None: super().__init__(config, *inputs, **kwargs) self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model") self.lm_head = keras.layers.Dense( config.vocab_size, use_bias=False, kernel_initializer=get_initializer(config.init_std), name="lm_head", ) self.config = config def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, }
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
@unpack_inputs @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None,
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = outputs[0] lm_logits = self.lm_head(hidden_states)
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
loss = None if labels is not None: # shift labels to the left and cut last logit token labels = tf.concat( [labels[:, 1:], tf.fill((labels.shape[0], 1), tf.cast(self.config.pad_token_id, labels.dtype))], axis=-1, ) loss = self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.config.hidden_size]) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "lm_head.weight": return tf_weight, "model.embed_tokens.weight" else: return (tf_weight,)
10,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_tf_xglm.py
class XGLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XGLMModel`]. It is used to instantiate an XGLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XGLM [facebook/xglm-564M](https://huggingface.co/facebook/xglm-564M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
Args: vocab_size (`int`, *optional*, defaults to 256008): Vocabulary size of the XGLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XGLMModel`] or [`FlaxXGLMModel`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). d_model (`int`, *optional*, defaults to 1024): Dimension of the layers and the pooler layer. ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. num_layers (`int`, *optional*, defaults to 24): Number of hidden layers Transformer decoder. attention_heads (`int`, *optional*, defaults to 16):
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, dencoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details.
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models).
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
Example: ```python >>> from transformers import XGLMModel, XGLMConfig >>> # Initializing a XGLM facebook/xglm-564M style configuration >>> configuration = XGLMConfig() >>> # Initializing a model from the facebook/xglm-564M style configuration >>> model = XGLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xglm" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "d_model", "num_hidden_layers": "num_layers", }
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
def __init__( self, vocab_size=256008, max_position_embeddings=2048, d_model=1024, ffn_dim=4096, num_layers=24, attention_heads=16, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, init_std=0.02, scale_embedding=True, use_cache=True, decoder_start_token_id=2, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.ffn_dim = ffn_dim self.num_layers = num_layers self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
self.init_std = init_std self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.use_cache = use_cache
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
10,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/configuration_xglm.py
class XGLMTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip>
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip>
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`):
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
The token used for padding, for example when batching sequences of different lengths. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
- `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"]
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer self.num_madeup_words = 7 madeup_words = [f"<madeupword{i}>" for i in range(self.num_madeup_words)] kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
# Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} sp_size = len(self.sp_model) madeup_words = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(madeup_words)
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + sep + token_ids_1
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
sep = [self.sep_token_id] if token_ids_1 is None: return len(sep + token_ids_0) * [0] return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
10,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/tokenization_xglm.py
class XGLMScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
10,549
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
class XGLMSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False)
10,550
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py
@staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype())
10,550
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xglm/modeling_xglm.py