text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class FlaxMultiHeadSelfAttention(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.n_heads = self.config.n_heads self.dim = self.config.dim self.dropout = nn.Dropout(rate=self.config.attention_dropout) if not (self.dim % self.n_heads == 0): raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}") self.q_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.k_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.v_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.out_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, query, key, value, mask, deterministic: bool = True, output_attentions: bool = False, ): bs, q_len, dim = query.shape k_len = key.shape[1] # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_len) def shape(x): """separate heads""" return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3) def unshape(x): """group heads""" return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head) scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len) mask = jnp.reshape(mask, mask_reshp) mask = mask.astype(scores.dtype) scores = scores - 1e30 * (1.0 - mask) weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len) weights = self.dropout(weights, deterministic=deterministic) context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head) context = unshape(context) # (bs, q_len, dim) context = self.out_lin(context) # (bs, q_len, dim) if output_attentions: return (context, weights) else: return (context,)
class_definition
6,836
9,769
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,300
class FlaxFFN(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout = nn.Dropout(rate=self.config.dropout) self.chunk_size_feed_forward = self.config.chunk_size_feed_forward self.seq_len_dim = 1 self.lin1 = nn.Dense( self.config.hidden_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.lin2 = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.activation = ACT2FN[self.config.activation] def __call__(self, hidden_states, deterministic: bool = True): hidden_states = self.lin1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.lin2(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states
class_definition
9,772
10,853
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,301
class FlaxTransformerBlock(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): assert ( self.config.dim % self.config.n_heads == 0 ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}" self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype) self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) self.ffn = FlaxFFN(self.config, dtype=self.dtype) self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) def __call__( self, hidden_states, attn_mask, output_attentions: bool = False, deterministic: bool = True, ): # Self-Attention sa_output = self.attention( query=hidden_states, key=hidden_states, value=hidden_states, mask=attn_mask, output_attentions=output_attentions, deterministic=deterministic, ) if output_attentions: sa_output, sa_weights = sa_output else: assert type(sa_output) is tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + hidden_states) # Feed Forward Network ffn_output = self.ffn(sa_output, deterministic=deterministic) ffn_output = self.output_layer_norm(ffn_output + sa_output) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output
class_definition
10,856
12,459
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,302
class FlaxTransformer(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers) ] def __call__( self, hidden_states, attention_mask, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, return_dict: bool = False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for layer_module in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, attn_mask=attention_mask, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1 # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions )
class_definition
12,462
14,237
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,303
class FlaxTransformerEncoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxTransformer(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, return_dict: bool = False, ): return self.layer( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict, )
class_definition
14,240
15,012
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,304
class FlaxDistilBertLMDecoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, inputs, kernel): inputs = jnp.asarray(inputs, self.dtype) kernel = jnp.asarray(kernel, self.dtype) y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ()))) bias = jnp.asarray(self.bias, self.dtype) y = y + bias return y
class_definition
15,015
15,636
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,305
class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig base_model_prefix = "distilbert" module_class: nn.Module = None def __init__( self, config: DistilBertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, head_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, )
class_definition
15,639
18,606
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,306
class FlaxDistilBertModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype) self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict input_embeds = self.embeddings(input_ids, deterministic=deterministic) return self.transformer( hidden_states=input_embeds, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
18,609
19,898
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,307
class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertModule
class_definition
20,071
20,168
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,308
class FlaxDistilBertForMaskedLMModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype) self.vocab_transform = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) if self.config.tie_word_embeddings: self.vocab_projector = FlaxDistilBertLMDecoder( self.config, dtype=self.dtype, ) else: self.vocab_projector = nn.Dense( self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict dlbrt_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict, ) hidden_states = dlbrt_output[0] prediction_logits = self.vocab_transform(hidden_states) prediction_logits = ACT2FN[self.config.activation](prediction_logits) prediction_logits = self.vocab_layer_norm(prediction_logits) if self.config.tie_word_embeddings: shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T) else: prediction_logits = self.vocab_projector(prediction_logits) if not return_dict: output = (prediction_logits,) + dlbrt_output[1:] return output return FlaxMaskedLMOutput( logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions, )
class_definition
20,267
22,742
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,309
class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMaskedLMModule
class_definition
22,864
22,978
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,310
class FlaxDistilBertForSequenceClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model distilbert_output = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = ACT2FN["relu"](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) # (bs, dim) if not return_dict: return (logits,) + distilbert_output[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, )
class_definition
23,097
25,017
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,311
class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForSequenceClassificationModule
class_definition
25,256
25,398
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,312
class FlaxDistilBertForMultipleChoiceModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense( 1, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None # Model outputs = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = ACT2FN["relu"](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
25,560
27,677
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,313
class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMultipleChoiceModule
class_definition
27,925
28,051
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,314
class FlaxDistilBertForTokenClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model outputs = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
28,350
29,753
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,315
class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForTokenClassificationModule
class_definition
29,999
30,135
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,316
class FlaxDistilBertForQuestionAnsweringModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) assert self.config.num_labels == 2 self.dropout = nn.Dropout(rate=self.config.qa_dropout) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model distilbert_output = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = distilbert_output[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + distilbert_output[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, )
class_definition
30,291
32,034
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,317
class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForQuestionAnsweringModule
class_definition
32,338
32,470
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py
null
8,318
class DistilBertTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = DistilBertTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class_definition
1,015
8,036
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/distilbert/tokenization_distilbert_fast.py
null
8,319
class DacFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an Dac feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used for padding. hop_length (`int`, *optional*, defaults to 512): Overlap length between successive windows. """ model_input_names = ["input_values", "n_quantizers"] def __init__( self, feature_size: int = 1, sampling_rate: int = 16000, padding_value: float = 0.0, hop_length: int = 512, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.hop_length = hop_length def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2: raise ValueError("Stereo audio isn't supported for now") input_values = BatchFeature({"input_values": raw_audio}) # normal padding on batch padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=False, pad_to_multiple_of=self.hop_length, ) if padding: padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :] input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
class_definition
967
7,910
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/feature_extraction_dac.py
null
8,320
class DacConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`DacModel`]. It is used to instantiate a Dac model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [descript/dac_16khz](https://huggingface.co/descript/dac_16khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: encoder_hidden_size (`int`, *optional*, defaults to 64): Intermediate representation dimension for the encoder. downsampling_ratios (`List[int]`, *optional*, defaults to `[2, 4, 8, 8]`): Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 1536): Intermediate representation dimension for the decoder. n_codebooks (`int`, *optional*, defaults to 9): Number of codebooks in the VQVAE. codebook_size (`int`, *optional*, defaults to 1024): Number of discrete codes in each codebook. codebook_dim (`int`, *optional*, defaults to 8): Dimension of the codebook vectors. If not defined, uses `encoder_hidden_size`. quantizer_dropout (`bool`, *optional*, defaults to 0): Whether to apply dropout to the quantizer. commitment_loss_weight (float, *optional*, defaults to 0.25): Weight of the commitment loss term in the VQVAE loss function. codebook_loss_weight (float, *optional*, defaults to 1.0): Weight of the codebook loss term in the VQVAE loss function. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). Example: ```python >>> from transformers import DacModel, DacConfig >>> # Initializing a "descript/dac_16khz" style configuration >>> configuration = DacConfig() >>> # Initializing a model (with random weights) from the "descript/dac_16khz" style configuration >>> model = DacModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dac" def __init__( self, encoder_hidden_size=64, downsampling_ratios=[2, 4, 8, 8], decoder_hidden_size=1536, n_codebooks=9, codebook_size=1024, codebook_dim=8, quantizer_dropout=0, commitment_loss_weight=0.25, codebook_loss_weight=1.0, sampling_rate=16000, **kwargs, ): self.encoder_hidden_size = encoder_hidden_size self.downsampling_ratios = downsampling_ratios self.decoder_hidden_size = decoder_hidden_size self.upsampling_ratios = downsampling_ratios[::-1] self.n_codebooks = n_codebooks self.codebook_size = codebook_size self.codebook_dim = codebook_dim self.quantizer_dropout = quantizer_dropout self.sampling_rate = sampling_rate self.hidden_size = encoder_hidden_size * (2 ** len(downsampling_ratios)) self.hop_length = int(np.prod(downsampling_ratios)) self.commitment_loss_weight = commitment_loss_weight self.codebook_loss_weight = codebook_loss_weight super().__init__(**kwargs) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length)
class_definition
826
4,554
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/configuration_dac.py
null
8,321
class DacOutput(ModelOutput): """ Args: loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. audio_values (`torch.Tensor` of shape `(batch_size, input_length)`): Reconstructed audio data. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). """ loss: torch.FloatTensor = None audio_values: torch.FloatTensor = None quantized_representation: torch.FloatTensor = None audio_codes: torch.LongTensor = None projected_latents: torch.FloatTensor = None
class_definition
1,126
2,203
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,322
class DacEncoderOutput(ModelOutput): """ Args: loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`, *optional*): Projected latents (continuous representation of input before quantization). """ loss: torch.FloatTensor = None quantized_representation: torch.FloatTensor = None audio_codes: torch.FloatTensor = None projected_latents: torch.FloatTensor = None
class_definition
2,217
3,176
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,323
class DacDecoderOutput(ModelOutput): """ Args: audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*): Decoded audio values, obtained using the decoder part of Dac. """ audio_values: torch.FloatTensor = None
class_definition
3,318
3,593
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,324
class Snake1d(nn.Module): """ A 1-dimensional Snake activation function module. """ def __init__(self, hidden_dim): super().__init__() self.alpha = nn.Parameter(torch.ones(1, hidden_dim, 1)) def forward(self, hidden_states): shape = hidden_states.shape hidden_states = hidden_states.reshape(shape[0], shape[1], -1) hidden_states = hidden_states + (self.alpha + 1e-9).reciprocal() * torch.sin(self.alpha * hidden_states).pow(2) hidden_states = hidden_states.reshape(shape) return hidden_states
class_definition
3,596
4,166
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,325
class DacVectorQuantize(nn.Module): """ Implementation of VQ similar to Karpathy's repo (https://github.com/karpathy/deep-vector-quantization) Additionally uses following tricks from improved VQGAN (https://arxiv.org/pdf/2110.04627.pdf): 1. Factorized codes: Perform nearest neighbor lookup in low-dimensional space for improved codebook usage 2. l2-normalized codes: Converts euclidean distance to cosine similarity which improves training stability """ def __init__(self, config: DacConfig): super().__init__() self.in_proj = nn.Conv1d(config.hidden_size, config.codebook_dim, kernel_size=1) self.out_proj = nn.Conv1d(config.codebook_dim, config.hidden_size, kernel_size=1) self.codebook = nn.Embedding(config.codebook_size, config.codebook_dim) def forward(self, hidden_state): """ Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors. Args: hidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`): Input tensor. Returns: quantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. commitment_loss (`torch.FloatTensor`of shape `(1)`): Commitment loss to train encoder to predict vectors closer to codebook entries. codebook_loss (`torch.FloatTensor`of shape `(1)`): Codebook loss to update the codebook. audio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`): Codebook indices for each codebook, quantized discrete representation of input. projected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). """ projected_latents = self.in_proj(hidden_state) quantized_representation, audio_codes = self.decode_latents(projected_latents) commitment_loss = F.mse_loss(projected_latents, quantized_representation.detach(), reduction="mean") codebook_loss = F.mse_loss(quantized_representation, projected_latents.detach(), reduction="mean") # noop in forward pass, straight-through gradient estimator in backward pass quantized_representation = projected_latents + (quantized_representation - projected_latents).detach() quantized_representation = self.out_proj(quantized_representation) return quantized_representation, commitment_loss, codebook_loss, audio_codes, projected_latents def decode_latents(self, hidden_states): batch_size, hidden_dim, sequence_length = hidden_states.shape encodings = hidden_states.permute(0, 2, 1).reshape(batch_size * sequence_length, hidden_dim) codebook = self.codebook.weight # codebook: (N x D) # L2 normalize encodings and codebook (ViT-VQGAN) encodings = F.normalize(encodings) codebook = F.normalize(codebook) # Compute euclidean distance with codebook l2_norm = encodings.pow(2).sum(1, keepdim=True) dist = -(l2_norm - 2 * encodings @ codebook.t()) + codebook.pow(2).sum(1, keepdim=True).t() indices = dist.max(1)[1] indices = indices.reshape(hidden_states.size(0), -1) quantized_representation = self.codebook(indices).transpose(1, 2) return quantized_representation, indices
class_definition
4,169
7,749
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,326
class DacResidualUnit(nn.Module): """ A residual unit composed of Snake1d and weight-normalized Conv1d layers with dilations. """ def __init__(self, dimension: int = 16, dilation: int = 1): super().__init__() pad = ((7 - 1) * dilation) // 2 self.snake1 = Snake1d(dimension) self.conv1 = nn.Conv1d(dimension, dimension, kernel_size=7, dilation=dilation, padding=pad) self.snake2 = Snake1d(dimension) self.conv2 = nn.Conv1d(dimension, dimension, kernel_size=1) def forward(self, hidden_state): """ Forward pass through the residual unit. Args: hidden_state (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor . Returns: output_tensor (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor after passing through the residual unit. """ output_tensor = hidden_state output_tensor = self.conv1(self.snake1(output_tensor)) output_tensor = self.conv2(self.snake2(output_tensor)) padding = (hidden_state.shape[-1] - output_tensor.shape[-1]) // 2 if padding > 0: hidden_state = hidden_state[..., padding:-padding] output_tensor = hidden_state + output_tensor return output_tensor
class_definition
7,752
9,106
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,327
class DacEncoderBlock(nn.Module): """Encoder block used in DAC encoder.""" def __init__(self, config: DacConfig, stride: int = 1, stride_index: int = 1): super().__init__() dimension = config.encoder_hidden_size * 2**stride_index self.res_unit1 = DacResidualUnit(dimension // 2, dilation=1) self.res_unit2 = DacResidualUnit(dimension // 2, dilation=3) self.res_unit3 = DacResidualUnit(dimension // 2, dilation=9) self.snake1 = Snake1d(dimension // 2) self.conv1 = nn.Conv1d( dimension // 2, dimension, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2) ) def forward(self, hidden_state): hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.snake1(self.res_unit3(hidden_state)) hidden_state = self.conv1(hidden_state) return hidden_state
class_definition
9,109
10,051
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,328
class DacDecoderBlock(nn.Module): """Decoder block used in DAC decoder.""" def __init__(self, config: DacConfig, stride: int = 1, stride_index: int = 1): super().__init__() input_dim = config.decoder_hidden_size // 2**stride_index output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1) self.snake1 = Snake1d(input_dim) self.conv_t1 = nn.ConvTranspose1d( input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2), ) self.res_unit1 = DacResidualUnit(output_dim, dilation=1) self.res_unit2 = DacResidualUnit(output_dim, dilation=3) self.res_unit3 = DacResidualUnit(output_dim, dilation=9) def forward(self, hidden_state): hidden_state = self.snake1(hidden_state) hidden_state = self.conv_t1(hidden_state) hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.res_unit3(hidden_state) return hidden_state
class_definition
10,054
11,150
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,329
class DacResidualVectorQuantize(nn.Module): """ ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://arxiv.org/abs/2107.03312) """ def __init__(self, config: DacConfig): super().__init__() n_codebooks = config.n_codebooks quantizer_dropout = config.quantizer_dropout self.n_codebooks = n_codebooks self.quantizers = nn.ModuleList([DacVectorQuantize(config) for i in range(config.n_codebooks)]) self.quantizer_dropout = quantizer_dropout def forward(self, hidden_state, n_quantizers: int = None): """ Quantizes the input tensor using a fixed set of codebooks and returns corresponding codebook vectors. Args: hidden_state (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Input tensor to be quantized. n_quantizers (`int`, *optional*): Number of quantizers to use. If specified and `self.quantizer_dropout` is True, this argument is ignored during training, and a random number of quantizers is used. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). commitment_loss (`torch.Tensor` of shape `(1)`): Commitment loss to train the encoder to predict vectors closer to codebook entries. codebook_loss (`torch.Tensor` of shape `(1)`): Codebook loss to update the codebook. """ quantized_representation = 0 residual = hidden_state commitment_loss = 0 codebook_loss = 0 audio_codes = [] projected_latents = [] n_quantizers = n_quantizers if n_quantizers is not None else self.n_codebooks if self.training: n_quantizers = torch.ones((hidden_state.shape[0],)) * self.n_codebooks + 1 dropout = torch.randint(1, self.n_codebooks + 1, (hidden_state.shape[0],)) n_dropout = int(hidden_state.shape[0] * self.quantizer_dropout) n_quantizers[:n_dropout] = dropout[:n_dropout] n_quantizers = n_quantizers.to(hidden_state.device) for i, quantizer in enumerate(self.quantizers): if self.training is False and i >= n_quantizers: break quantized_representation_i, commitment_loss_i, codebook_loss_i, indices_i, projected_latents_i = quantizer( residual ) # Create mask to apply quantizer dropout mask = torch.full((hidden_state.shape[0],), fill_value=i, device=hidden_state.device) < n_quantizers quantized_representation = quantized_representation + quantized_representation_i * mask[:, None, None] residual = residual - quantized_representation_i # Sum losses commitment_loss += commitment_loss_i * mask codebook_loss += codebook_loss_i * mask audio_codes.append(indices_i) projected_latents.append(projected_latents_i) audio_codes = torch.stack(audio_codes, dim=1) projected_latents = torch.cat(projected_latents, dim=1) return quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss def from_codes(self, audio_codes: torch.Tensor): """ Reconstructs the continuous representation from quantized codes. Args: audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Quantized discrete representation of input. Returns: quantized_representation (`torch.Tensor`): Quantized continuous representation of input. projected_latents (`torch.Tensor`): List of projected latents (continuous representations of input before quantization) for each codebook. audio_codes (`torch.Tensor`): Codebook indices for each codebook. """ quantized_representation = 0.0 projected_latents = [] n_codebooks = audio_codes.shape[1] for i in range(n_codebooks): projected_latents_i = self.quantizers[i].codebook(audio_codes[:, i, :]).transpose(1, 2) projected_latents.append(projected_latents_i) quantized_representation += self.quantizers[i].out_proj(projected_latents_i) return quantized_representation, torch.cat(projected_latents, dim=1), audio_codes def from_latents(self, latents: torch.Tensor): """Reconstructs the quantized representation from unquantized latents. Args: latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`): Continuous representation of input after projection. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the full-projected space. quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the latent space (continuous representation before quantization). """ quantized_representation = 0 quantized_latents = [] codes = [] codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers]) dims = torch.cumsum(codebook_dims_tensor, dim=0) n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0] for i in range(n_codebooks): hidden_dim_j, hidden_dim_k = dims[i], dims[i + 1] quantized_latents_i, codes_i = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :]) quantized_latents.append(quantized_latents_i) codes.append(codes_i) quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i) quantized_representation = quantized_representation + quantized_representation_i return quantized_representation, torch.cat(quantized_latents, dim=1)
class_definition
11,153
17,748
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,330
class DacDecoder(nn.Module): """DAC Decoder""" def __init__(self, config: DacConfig): super().__init__() input_channel = config.hidden_size channels = config.decoder_hidden_size strides = config.upsampling_ratios # Add first conv layer self.conv1 = nn.Conv1d(input_channel, channels, kernel_size=7, padding=3) # Add upsampling + MRF blocks block = [] for stride_index, stride in enumerate(strides): block += [DacDecoderBlock(config, stride, stride_index)] self.block = nn.ModuleList(block) output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1) self.snake1 = Snake1d(output_dim) self.conv2 = nn.Conv1d(output_dim, 1, kernel_size=7, padding=3) self.tanh = nn.Tanh() def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for layer in self.block: hidden_state = layer(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) hidden_state = self.tanh(hidden_state) return hidden_state
class_definition
17,751
18,905
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,331
class DacEncoder(nn.Module): """DAC Encoder""" def __init__(self, config: DacConfig): super().__init__() strides = config.downsampling_ratios # Create first convolution self.conv1 = nn.Conv1d(1, config.encoder_hidden_size, kernel_size=7, padding=3) self.block = [] # Create EncoderBlocks that double channels as they downsample by `stride` for stride_index, stride in enumerate(strides): stride_index = stride_index + 1 self.block += [DacEncoderBlock(config, stride=stride, stride_index=stride_index)] self.block = nn.ModuleList(self.block) d_model = config.encoder_hidden_size * 2**stride_index self.snake1 = Snake1d(d_model) self.conv2 = nn.Conv1d(d_model, config.hidden_size, kernel_size=3, padding=1) def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for module in self.block: hidden_state = module(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) return hidden_state
class_definition
18,908
20,032
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,332
class DacPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DacConfig base_model_prefix = "dac" main_input_name = "input_values" def _init_weights(self, module): if isinstance(module, nn.Conv1d): nn.init.trunc_normal_(module.weight, std=0.02) nn.init.constant_(module.bias, 0) def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm for layer in self.quantizer.quantizers: weight_norm(layer.in_proj) weight_norm(layer.out_proj) weight_norm(self.encoder.conv1) weight_norm(self.encoder.conv2) for layer in self.encoder.block: weight_norm(layer.conv1) weight_norm(layer.res_unit1.conv1) weight_norm(layer.res_unit1.conv2) weight_norm(layer.res_unit2.conv1) weight_norm(layer.res_unit2.conv2) weight_norm(layer.res_unit3.conv1) weight_norm(layer.res_unit3.conv2) weight_norm(self.decoder.conv1) weight_norm(self.decoder.conv2) for layer in self.decoder.block: weight_norm(layer.conv_t1) weight_norm(layer.res_unit1.conv1) weight_norm(layer.res_unit1.conv2) weight_norm(layer.res_unit2.conv1) weight_norm(layer.res_unit2.conv2) weight_norm(layer.res_unit3.conv1) weight_norm(layer.res_unit3.conv2) def remove_weight_norm(self): for layer in self.quantizer.quantizers: nn.utils.remove_weight_norm(layer.in_proj) nn.utils.remove_weight_norm(layer.out_proj) nn.utils.remove_weight_norm(self.encoder.conv1) nn.utils.remove_weight_norm(self.encoder.conv2) for layer in self.encoder.block: nn.utils.remove_weight_norm(layer.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv2) nn.utils.remove_weight_norm(layer.res_unit2.conv1) nn.utils.remove_weight_norm(layer.res_unit2.conv2) nn.utils.remove_weight_norm(layer.res_unit3.conv1) nn.utils.remove_weight_norm(layer.res_unit3.conv2) nn.utils.remove_weight_norm(self.decoder.conv1) nn.utils.remove_weight_norm(self.decoder.conv2) for layer in self.decoder.block: nn.utils.remove_weight_norm(layer.conv_t1) nn.utils.remove_weight_norm(layer.res_unit1.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv2) nn.utils.remove_weight_norm(layer.res_unit2.conv1) nn.utils.remove_weight_norm(layer.res_unit2.conv2) nn.utils.remove_weight_norm(layer.res_unit3.conv1) nn.utils.remove_weight_norm(layer.res_unit3.conv2)
class_definition
20,035
23,085
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,333
class DacModel(DacPreTrainedModel): def __init__(self, config: DacConfig): super().__init__(config) self.config = config self.encoder = DacEncoder(config) self.decoder = DacDecoder(config) self.quantizer = DacResidualVectorQuantize(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() @replace_return_docstrings(output_type=DacEncoderOutput, config_class=_CONFIG_FOR_DOC) def encode( self, input_values: torch.Tensor, n_quantizers: int = None, return_dict: Optional[bool] = None, ): """ Encode given audio data and return quantized latent codes Args: input_values (`torch.Tensor of shape `(batch_size, 1, time_steps)`): Input audio data to encode, n_quantizers (int, *optional*): Number of quantizers to use. If None, all quantizers are used. Default is None. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: """ return_dict = return_dict if return_dict is not None else self.config.return_dict quantized_representation = self.encoder(input_values) quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss = self.quantizer( quantized_representation, n_quantizers ) loss = self.config.commitment_loss_weight * commitment_loss + self.config.codebook_loss_weight * codebook_loss if not return_dict: return (loss, quantized_representation, audio_codes, projected_latents) return DacEncoderOutput(loss, quantized_representation, audio_codes, projected_latents) @replace_return_docstrings(output_type=DacDecoderOutput, config_class=_CONFIG_FOR_DOC) def decode( self, quantized_representation: Optional[torch.Tensor] = None, audio_codes: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ): """Decode given latent codes and return audio data Args: quantized_representation (torch.Tensor of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): The codebook indices for each codebook, representing the quantized discrete representation of the input. This parameter should be provided if you want to decode directly from the audio codes (it will overwrite quantized_representation). return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: """ if quantized_representation is None and audio_codes is None: raise ValueError("Either `quantized_representation` or `audio_codes` must be provided.") return_dict = return_dict if return_dict is not None else self.config.return_dict if audio_codes is not None: quantized_representation = self.quantizer.from_codes(audio_codes)[0] audio_values = self.decoder(quantized_representation).squeeze(1) if not return_dict: return (audio_values,) return DacDecoderOutput(audio_values) @add_start_docstrings_to_model_forward(DAC_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DacOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: torch.Tensor, n_quantizers: int = None, return_dict: Optional[bool] = None, ): """ Returns: Examples: ```python >>> from datasets import load_dataset, Audio >>> from transformers import DacModel, AutoProcessor >>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> model = DacModel.from_pretrained("descript/dac_16khz") >>> processor = AutoProcessor.from_pretrained("descript/dac_16khz") >>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) >>> audio_sample = librispeech_dummy[-1]["audio"]["array"] >>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt") >>> encoder_outputs = model.encode(inputs["input_values"]) >>> # Get the intermediate audio codes >>> audio_codes = encoder_outputs.audio_codes >>> # Reconstruct the audio from its quantized representation >>> audio_values = model.decode(encoder_outputs.quantized_representation) >>> # or the equivalent with a forward pass >>> audio_values = model(inputs["input_values"]).audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict length = input_values.shape[-1] loss, quantized_representation, audio_codes, projected_latents = self.encode( input_values, n_quantizers, return_dict=False ) audio_values = self.decode(quantized_representation, return_dict=False)[0][..., :length] if not return_dict: return (loss, audio_values, quantized_representation, audio_codes, projected_latents) return DacOutput(loss, audio_values, quantized_representation, audio_codes, projected_latents)
class_definition
24,478
30,265
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dac/modeling_dac.py
null
8,334
class EncodecOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*) Decoded audio values, obtained using the decoder part of Encodec. """ audio_codes: torch.LongTensor = None audio_values: torch.FloatTensor = None
class_definition
1,216
1,705
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,335
class EncodecEncoderOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding. """ audio_codes: torch.LongTensor = None audio_scales: torch.FloatTensor = None
class_definition
1,719
2,243
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,336
class EncodecDecoderOutput(ModelOutput): """ Args: audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Encodec. """ audio_values: torch.FloatTensor = None
class_definition
2,257
2,542
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,337
class EncodecConv1d(nn.Module): """Conv1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1 ): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) # warn user on unusual setup between dilation and stride if stride > 1 and dilation > 1: logger.warning( "EncodecConv1d has been initialized with stride > 1 and dilation > 1" f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})." ) self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if self.norm_type == "weight_norm": self.conv = weight_norm(self.conv) elif self.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] # Effective kernel size with dilations. kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer("stride", stride, persistent=False) self.register_buffer("kernel_size", kernel_size, persistent=False) self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False) def _get_extra_padding_for_conv1d( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """See `pad_for_conv1d`.""" length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0): """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happens. """ length = hidden_states.shape[-1] padding_left, padding_right = paddings if not mode == "reflect": return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def forward(self, hidden_states): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if self.causal: # Left padding for causal hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: # Asymmetric padding required for odd strides padding_right = self.padding_total // 2 padding_left = self.padding_total - padding_right hidden_states = self._pad1d( hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode ) hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) return hidden_states
class_definition
2,545
6,689
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,338
class EncodecConvTranspose1d(nn.Module): """ConvTranspose1d with asymmetric or causal padding and normalization.""" def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if config.norm_type == "weight_norm": self.conv = weight_norm(self.conv) elif config.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions") def forward(self, hidden_states): kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be # removed at the very end, when keeping only the right length for the output, # as removing it here would require also passing the length at the matching layer # in the encoder. if self.causal: # Trim the padding on the right according to the specified ratio # if trim_right_ratio = 1.0, trim everything from right padding_right = math.ceil(padding_total * self.trim_right_ratio) else: # Asymmetric padding required for odd strides padding_right = padding_total // 2 padding_left = padding_total - padding_right # unpad end = hidden_states.shape[-1] - padding_right hidden_states = hidden_states[..., padding_left:end] return hidden_states
class_definition
6,692
9,119
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,339
class EncodecLSTM(nn.Module): """ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout. """ def __init__(self, config, dimension): super().__init__() self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers) def forward(self, hidden_states): hidden_states = hidden_states.permute(2, 0, 1) hidden_states = self.lstm(hidden_states)[0] + hidden_states hidden_states = hidden_states.permute(1, 2, 0) return hidden_states
class_definition
9,122
9,675
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,340
class EncodecResnetBlock(nn.Module): """ Residual block from SEANet model as used by EnCodec. """ def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError("Number of kernel sizes should match number of dilations") hidden = dim // config.compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states): residual = hidden_states for layer in self.block: hidden_states = layer(hidden_states) return self.shortcut(residual) + hidden_states
class_definition
9,678
10,891
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,341
class EncodecEncoder(nn.Module): """SEANet encoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 # Downsample to raw audio scale for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])] # Add downsampling layers model += [nn.ELU()] model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [EncodecLSTM(config, scaling * config.num_filters)] model += [nn.ELU()] model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states
class_definition
10,894
12,140
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,342
class EncodecDecoder(nn.Module): """SEANet decoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] model += [EncodecLSTM(config, scaling * config.num_filters)] # Upsample to raw audio scale for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters # Add upsampling layers model += [nn.ELU()] model += [ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio) ] # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))] scaling //= 2 # Add final layers model += [nn.ELU()] model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states
class_definition
12,143
13,486
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,343
class EncodecEuclideanCodebook(nn.Module): """Codebook with Euclidean distance.""" def __init__(self, config: EncodecConfig): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer("inited", torch.Tensor([True])) self.register_buffer("cluster_size", torch.zeros(config.codebook_size)) self.register_buffer("embed", embed) self.register_buffer("embed_avg", embed.clone()) def quantize(self, hidden_states): embed = self.embed.t() scaled_states = hidden_states.pow(2).sum(1, keepdim=True) dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True)) embed_ind = dist.max(dim=-1).indices return embed_ind def encode(self, hidden_states): shape = hidden_states.shape # pre-process hidden_states = hidden_states.reshape((-1, shape[-1])) # quantize embed_ind = self.quantize(hidden_states) # post-process embed_ind = embed_ind.view(*shape[:-1]) return embed_ind def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize
class_definition
13,489
14,766
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,344
class EncodecVectorQuantization(nn.Module): """ Vector quantization implementation. Currently supports only euclidean distance. """ def __init__(self, config: EncodecConfig): super().__init__() self.codebook = EncodecEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize
class_definition
14,769
15,370
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,345
class EncodecResidualVectorQuantizer(nn.Module): """Residual Vector Quantizer.""" def __init__(self, config: EncodecConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = config.num_quantizers self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)]) def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int: """Return num_quantizers based on specified target bandwidth.""" bw_per_q = math.log2(self.codebook_size) * self.frame_rate num_quantizers = self.num_quantizers if bandwidth is not None and bandwidth > 0.0: num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q))) return num_quantizers def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth) residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes to the quantized representation.""" quantized_out = torch.tensor(0.0, device=codes.device) for i, indices in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized return quantized_out
class_definition
15,373
17,395
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,346
class EncodecPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = EncodecConfig base_model_prefix = "encodec" main_input_name = "input_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LSTM): for name, param in module.named_parameters(): if "weight" in name: nn.init.xavier_uniform_(param) elif "bias" in name: nn.init.constant_(param, 0.0)
class_definition
17,398
18,918
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,347
class EncodecModel(EncodecPreTrainedModel): def __init__(self, config: EncodecConfig): super().__init__(config) self.config = config self.encoder = EncodecEncoder(config) self.decoder = EncodecDecoder(config) self.quantizer = EncodecResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame( self, input_values: torch.Tensor, bandwidth: float, padding_mask: int ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first normalized. The padding mask is required to compute the correct scale. """ length = input_values.shape[-1] duration = length / self.config.sampling_rate if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s: raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}") scale = None if self.config.normalize: # if the padding is non zero input_values = input_values * padding_mask.unsqueeze(1) mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1] scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8 input_values = input_values / scale embeddings = self.encoder(input_values) codes = self.quantizer.encode(embeddings, bandwidth) codes = codes.transpose(0, 1) return codes, scale def encode( self, input_values: torch.Tensor, padding_mask: torch.Tensor = None, bandwidth: Optional[float] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]: """ Encodes the input audio waveform into discrete codes. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as bandwidth == 6.0 Returns: A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with `codebook` of shape `[batch_size, num_codebooks, frames]`. """ return_dict = return_dict if return_dict is not None else self.config.return_dict if bandwidth is None: bandwidth = self.config.target_bandwidths[0] if bandwidth not in self.config.target_bandwidths: raise ValueError( f"This model doesn't support the bandwidth {bandwidth}. " f"Select one of {self.config.target_bandwidths}." ) _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}") chunk_length = self.config.chunk_length if chunk_length is None: chunk_length = input_length stride = input_length else: stride = self.config.chunk_stride if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() encoded_frames = [] scales = [] step = chunk_length - stride if (input_length % stride) - step != 0: raise ValueError( "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly." ) for offset in range(0, input_length - step, stride): mask = padding_mask[..., offset : offset + chunk_length].bool() frame = input_values[:, :, offset : offset + chunk_length] encoded_frame, scale = self._encode_frame(frame, bandwidth, mask) encoded_frames.append(encoded_frame) scales.append(scale) encoded_frames = torch.stack(encoded_frames) if not return_dict: return (encoded_frames, scales) return EncodecEncoderOutput(encoded_frames, scales) @staticmethod def _linear_overlap_add(frames: List[torch.Tensor], stride: int): # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario # e.g., more than 2 frames per position. # The core idea is to use a weight function that is a triangle, # with a maximum value at the middle of the chunk. # We use this weighting when summing the frames, and divide by the sum of weights # for each positions at the end. Thus: # - if a frame is the only one to cover a position, the weighting is a no-op. # - if 2 frames cover a position: # ... ... # / \/ \ # / /\ \ # S T , i.e. S offset of second frame starts, T end of first frame. # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset. # After the final normalization, the weight of the second frame at position `t` is # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want. # # - if more than 2 frames overlap at a given point, we hope that by induction # something sensible happens. if len(frames) == 0: raise ValueError("`frames` cannot be an empty list.") device = frames[0].device dtype = frames[0].dtype shape = frames[0].shape[:-1] total_size = stride * (len(frames) - 1) + frames[-1].shape[-1] frame_length = frames[0].shape[-1] time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1] weight = 0.5 - (time_vec - 0.5).abs() sum_weight = torch.zeros(total_size, device=device, dtype=dtype) out = torch.zeros(*shape, total_size, device=device, dtype=dtype) offset: int = 0 for frame in frames: frame_length = frame.shape[-1] out[..., offset : offset + frame_length] += weight[:frame_length] * frame sum_weight[offset : offset + frame_length] += weight[:frame_length] offset += stride if sum_weight.min() == 0: raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`") return out / sum_weight def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor: codes = codes.transpose(0, 1) embeddings = self.quantizer.decode(codes) outputs = self.decoder(embeddings) if scale is not None: outputs = outputs * scale.view(-1, 1, 1) return outputs def decode( self, audio_codes: torch.Tensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]: """ Decodes the given frames into an output audio waveform. Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be trimmed. Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ return_dict = return_dict if return_dict is not None else self.config.return_dict chunk_length = self.config.chunk_length if chunk_length is None: if len(audio_codes) != 1: raise ValueError(f"Expected one frame, got {len(audio_codes)}") audio_values = self._decode_frame(audio_codes[0], audio_scales[0]) else: decoded_frames = [] for frame, scale in zip(audio_codes, audio_scales): frames = self._decode_frame(frame, scale) decoded_frames.append(frames) audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1) # truncate based on padding mask if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., : padding_mask.shape[-1]] if not return_dict: return (audio_values,) return EncodecDecoderOutput(audio_values) @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, bandwidth: Optional[float] = None, audio_codes: Optional[torch.Tensor] = None, audio_scales: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, EncodecModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model_id = "facebook/encodec_24khz" >>> model = EncodecModel.from_pretrained(model_id) >>> processor = AutoProcessor.from_pretrained(model_id) >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_codes = outputs.audio_codes >>> audio_values = outputs.audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if audio_codes is not None and audio_scales is None: raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`") if audio_scales is not None and audio_codes is None: raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`") if audio_scales is None and audio_codes is None: audio_codes, audio_scales = self.encode(input_values, padding_mask, bandwidth, False) audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0] if not return_dict: return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
class_definition
21,769
33,786
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/modeling_encodec.py
null
8,348
class EncodecConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`): The range of diffent bandwiths the model can encode audio with. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). audio_channels (`int`, *optional*, defaults to 1): Number of channels in the audio data. Either 1 for mono or 2 for stereo. normalize (`bool`, *optional*, defaults to `False`): Whether the audio shall be normalized when passed. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. hidden_size (`int`, *optional*, defaults to 128): Intermediate representation dimension. num_filters (`int`, *optional*, defaults to 32): Number of convolution kernels of first `EncodecConv1d` down sampling layer. num_residual_layers (`int`, *optional*, defaults to 1): Number of residual layers. upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`): Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here that must match the decoder order. norm_type (`str`, *optional*, defaults to `"weight_norm"`): Normalization method. Should be in `["weight_norm", "time_group_norm"]` kernel_size (`int`, *optional*, defaults to 7): Kernel size for the initial convolution. last_kernel_size (`int`, *optional*, defaults to 7): Kernel size for the last convolution layer. residual_kernel_size (`int`, *optional*, defaults to 3): Kernel size for the residual layers. dilation_growth_rate (`int`, *optional*, defaults to 2): How much to increase the dilation with each layer. use_causal_conv (`bool`, *optional*, defaults to `True`): Whether to use fully causal convolution. pad_mode (`str`, *optional*, defaults to `"reflect"`): Padding mode for the convolutions. compress (`int`, *optional*, defaults to 2): Reduced dimensionality in residual branches (from Demucs v3). num_lstm_layers (`int`, *optional*, defaults to 2): Number of LSTM layers at the end of the encoder. trim_right_ratio (`float`, *optional*, defaults to 1.0): Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If equal to 1.0, it means that all the trimming is done at the right. codebook_size (`int`, *optional*, defaults to 1024): Number of discret codes that make up VQVAE. codebook_dim (`int`, *optional*): Dimension of the codebook vectors. If not defined, uses `hidden_size`. use_conv_shortcut (`bool`, *optional*, defaults to `True`): Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False, an identity function will be used, giving a generic residual connection. Example: ```python >>> from transformers import EncodecModel, EncodecConfig >>> # Initializing a "facebook/encodec_24khz" style configuration >>> configuration = EncodecConfig() >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration >>> model = EncodecModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "encodec" def __init__( self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24_000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type="weight_norm", kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode="reflect", compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs, ): self.target_bandwidths = target_bandwidths self.sampling_rate = sampling_rate self.audio_channels = audio_channels self.normalize = normalize self.chunk_length_s = chunk_length_s self.overlap = overlap self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.norm_type = norm_type self.kernel_size = kernel_size self.last_kernel_size = last_kernel_size self.residual_kernel_size = residual_kernel_size self.dilation_growth_rate = dilation_growth_rate self.use_causal_conv = use_causal_conv self.pad_mode = pad_mode self.compress = compress self.num_lstm_layers = num_lstm_layers self.trim_right_ratio = trim_right_ratio self.codebook_size = codebook_size self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size self.use_conv_shortcut = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**kwargs) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
class_definition
886
8,494
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/configuration_encodec.py
null
8,349
class EncodecFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an EnCodec feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Instantiating a feature extractor with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. """ model_input_names = ["input_values", "padding_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 24000, padding_value: float = 0.0, chunk_length_s: float = None, overlap: float = None, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.chunk_length_s = chunk_length_s self.overlap = overlap # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") padded_inputs = None input_values = BatchFeature({"input_values": raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: max_length = min(array.shape[0] for array in raw_audio) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max(array.shape[0] for array in raw_audio) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = "max_length" else: padded_inputs = input_values # normal padding on batch if padded_inputs is None: padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, ) if padding: padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
class_definition
959
9,872
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/encodec/feature_extraction_encodec.py
null
8,350
class BridgeTowerVisionConfig(PretrainedConfig): r""" This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a configuration with the defaults will yield a similar configuration to that of the bridgetower-base [BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in visual encoder model. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. image_size (`int`, *optional*, defaults to 288): The size (resolution) of each image. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. stop_gradient (`bool`, *optional*, defaults to `False`): Whether to stop gradient for training. share_layernorm (`bool`, *optional*, defaults to `True`): Whether LayerNorm layers are shared. remove_last_layer (`bool`, *optional*, defaults to `False`): Whether to remove the last layer from the vision encoder. Example: ```python >>> from transformers import BridgeTowerVisionConfig >>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the vision model >>> configuration = BridgeTowerVisionConfig() >>> # Accessing the configuration >>> configuration ```""" model_type = "bridgetower_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, num_hidden_layers=12, num_channels=3, patch_size=16, image_size=288, initializer_factor=1, layer_norm_eps=1e-05, stop_gradient=False, share_layernorm=True, remove_last_layer=False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.stop_gradient = stop_gradient self.share_layernorm = share_layernorm self.remove_last_layer = remove_last_layer
class_definition
857
3,806
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/configuration_bridgetower.py
null
8,351
class BridgeTowerTextConfig(PretrainedConfig): r""" This is the configuration class to store the text configuration of a [`BridgeTowerModel`]. The default values here are copied from RoBERTa. Instantiating a configuration with the defaults will yield a similar configuration to that of the bridgetower-base [BridegTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the text part of the model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BridgeTowerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 514): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids`. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import BridgeTowerTextConfig >>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the text model >>> configuration = BridgeTowerTextConfig() >>> # Accessing the configuration >>> configuration ```""" model_type = "bridgetower_text_model" base_config_key = "text_config" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, initializer_factor=1, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", use_cache=True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_factor = initializer_factor self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id
class_definition
3,809
9,382
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/configuration_bridgetower.py
null
8,352
class BridgeTowerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BridgeTowerModel`]. It is used to instantiate a BridgeTower model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the bridgetower-base [BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: share_cross_modal_transformer_layers (`bool`, *optional*, defaults to `True`): Whether cross modal transformer layers are shared. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. share_link_tower_layers (`bool`, *optional*, defaults to `False`): Whether the bride/link tower layers are shared. link_tower_type (`str`, *optional*, defaults to `"add"`): Type of the bridge/link layer. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. init_layernorm_from_vision_encoder (`bool`, *optional*, defaults to `False`): Whether to init LayerNorm from the vision encoder. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BridgeTowerTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BridgeTowerVisionConfig`]. Example: ```python >>> from transformers import BridgeTowerModel, BridgeTowerConfig >>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration >>> configuration = BridgeTowerConfig() >>> # Initializing a model from the BridgeTower/bridgetower-base style configuration >>> model = BridgeTowerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "bridgetower" sub_configs = {"text_config": BridgeTowerTextConfig, "vision_config": BridgeTowerVisionConfig} def __init__( self, share_cross_modal_transformer_layers=True, hidden_act="gelu", hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type="add", num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs, ): # TODO: remove this once the Hub files are updated. _ = kwargs.pop("text_config_dict", None) _ = kwargs.pop("vision_config_dict", None) super().__init__(**kwargs) self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers self.hidden_act = hidden_act self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.share_link_tower_layers = share_link_tower_layers self.link_tower_type = link_tower_type self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.tie_word_embeddings = tie_word_embeddings self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.") self.text_config = BridgeTowerTextConfig(**text_config) self.vision_config = BridgeTowerVisionConfig(**vision_config) @classmethod def from_text_vision_configs( cls, text_config: BridgeTowerTextConfig, vision_config: BridgeTowerVisionConfig, **kwargs ): r""" Instantiate a [`BridgeTowerConfig`] (or a derived class) from BridgeTower text model configuration. Returns: [`BridgeTowerConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
class_definition
9,385
14,789
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/configuration_bridgetower.py
null
8,353
class BridgeTowerProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_length": False, "verbose": True, }, "images_kwargs": { "do_normalize": True, "do_center_crop": True, }, }
class_definition
923
1,457
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/processing_bridgetower.py
null
8,354
class BridgeTowerProcessor(ProcessorMixin): r""" Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single processor. [`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and [`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and [`~BridgeTowerProcessor.decode`] for more information. Args: image_processor (`BridgeTowerImageProcessor`): An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input. tokenizer (`RobertaTokenizerFast`): An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "BridgeTowerImageProcessor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[BridgeTowerProcessorKwargs], ) -> BatchEncoding: """ This method uses [`BridgeTowerImageProcessor.__call__`] method to prepare image(s) for the model, and [`RobertaTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ output_kwargs = self._merge_kwargs( BridgeTowerProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) encoding = self.tokenizer(text=text, **output_kwargs["text_kwargs"]) # add pixel_values + pixel_mask encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) encoding.update(encoding_image_processor) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class_definition
1,460
4,399
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/processing_bridgetower.py
null
8,355
class BridgeTowerModelOutput(ModelOutput): """ Output type of [`BridgeTowerModel`]. Args: text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`): Sequence of hidden-states at the text output of the last layer of the model. image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`): Sequence of hidden-states at the image output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`): Concatenation of last layer hidden-state of the first token of the text and image sequence (classification token), respectively, after further processing through layers used for auxiliary pretraining tasks. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ text_features: torch.FloatTensor = None image_features: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
6,005
8,000
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,356
class BridgeTowerContrastiveOutput(ModelOutput): """ Output type of ['BridgeTowerForContrastiveLearning'] Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`: Image-text contrastive loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). text_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`): The text embeddings obtained by applying the projection layer to the pooler_output. image_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. cross_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`): The text-image cross-modal embeddings obtained by applying the projection layer to the pooler_output. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None text_embeds: Optional[Tuple[torch.FloatTensor]] = None image_embeds: Optional[Tuple[torch.FloatTensor]] = None cross_embeds: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
8,014
10,336
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,357
class BridgeTowerResidualAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64) self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = nn.ModuleDict( OrderedDict( [ ("c_fc", nn.Linear(config.hidden_size, config.hidden_size * 4)), ("gelu", QuickGELUActivation()), ("c_proj", nn.Linear(config.hidden_size * 4, config.hidden_size)), ] ) ) self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attn_mask = None def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor): if attention_mask is not None: attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device) self.attn_mask = ( self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device) if self.attn_mask is not None else None ) return self.attn( hidden_state, hidden_state, hidden_state, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=attention_mask, )[0] def forward(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor = None): residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask) hidden_state = self.ln_2(residual_state) for _, layer in self.mlp.items(): hidden_state = layer(hidden_state) hidden_state = residual_state + hidden_state return hidden_state
class_definition
10,339
12,104
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,358
class BridgeTowerTransformer(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.num_hidden_layers = config.num_hidden_layers if config.remove_last_layer: self.resblocks = nn.ModuleList( [BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers - 1)] ) else: self.resblocks = nn.ModuleList( [BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers)] ) self.stop_gradient = config.stop_gradient def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): hidden_states = [] for block in self.resblocks: hidden_state = block(hidden_state, attention_mask) if self.stop_gradient: hidden_states.append(hidden_state.detach()) else: hidden_states.append(hidden_state) return hidden_states
class_definition
12,107
13,138
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,359
class BridgeTowerVisionEmbeddings(nn.Module): def __init__(self, config: BridgeTowerVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class_definition
13,238
17,078
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,360
class BridgeTowerVisionTransformer(nn.Module): def __init__(self, config): super().__init__() self.embeddings = BridgeTowerVisionEmbeddings(config) self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.transformer = BridgeTowerTransformer(config) self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.share_layernorm = config.share_layernorm if not config.share_layernorm: self.ln_separate = nn.ModuleList( [nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)] ) def forward( self, pixel_values: torch.Tensor, attention_mask, interpolate_pos_encoding: bool = False, ): hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding) hidden_states = self.ln_pre(hidden_states) # NLD -> LND hidden_states = hidden_states.permute(1, 0, 2) hidden_states = self.transformer(hidden_states, attention_mask) # shape = [num_hidden_layers, hidden_size, *, grid ** 2] hidden_states = torch.stack(hidden_states, dim=0) # shape = [num_hidden_layers, *, hidden_size, grid ** 2] hidden_states = hidden_states.permute(0, 2, 1, 3) if self.share_layernorm: hidden_states = self.ln_post(hidden_states) else: hidden_states_stack = [] for hidden_states, ln in zip(hidden_states, self.ln_separate): hidden_states = ln(hidden_states) hidden_states_stack.append(hidden_states) # shape = [num_hidden_layers, *, hidden_size, grid ** 2] hidden_states = torch.stack(hidden_states_stack, dim=0) return hidden_states def forward_pre( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False, ): hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.ln_pre(hidden_states) # NLD -> LND hidden_states = hidden_states.permute(1, 0, 2) return hidden_states def forward_post(self, hidden_state: torch.Tensor): visual_output_post = hidden_state.permute(1, 0, 2) visual_output_post = self.ln_post(visual_output_post) return visual_output_post
class_definition
17,081
19,506
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,361
class BridgeTowerLinkTower(nn.Module): def __init__(self, config): super().__init__() self.link_tower_type = config.link_tower_type self.hidden_size = config.hidden_size if config.link_tower_type in ["add", "scaled_add", "interpolate"]: if config.link_tower_type == "scaled_add": self.scaled_factor = nn.Parameter(torch.tensor(1.0)) elif config.link_tower_type == "interpolate": self.beta = nn.Parameter(torch.tensor(0.5)) self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) else: raise NotImplementedError(f"link_tower_type {config.link_tower_type} is not implemented") def forward(self, hidden_states, cross_modal_hidden_states, attention_mask): if self.link_tower_type == "add": return self.LayerNorm(hidden_states + cross_modal_hidden_states) elif self.link_tower_type == "scaled_add": return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states) elif self.link_tower_type == "interpolate": return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta) else: raise NotImplementedError(f"link_tower_type {self.link_tower_type} is not implemented")
class_definition
19,509
20,849
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,362
class BridgeTowerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
20,943
21,556
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,363
class BridgeTowerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
21,652
22,224
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,364
class BridgeTowerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
22,314
22,929
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,365
class BridgeTowerPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class_definition
23,019
23,585
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,366
class BridgeTowerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BridgeTowerModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs
class_definition
23,694
31,050
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,367
class BridgeTowerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BRIDGE_TOWER_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = BridgeTowerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
31,245
33,389
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,368
class BridgeTowerBertCrossLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BridgeTowerAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention self.crossattention = BridgeTowerAttention(config) self.intermediate = BridgeTowerIntermediate(config) self.output = BridgeTowerOutput(config) def forward( self, hidden_states, encoder_hidden_states, attention_mask=None, head_mask=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=None, output_attentions=output_attentions, past_key_value=None, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache # add self attentions if we output attention weights outputs = self_attention_outputs[1:] cross_attention_outputs = self.crossattention( attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] # add cross attentions if we output attention weights outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class_definition
33,392
35,729
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,369
class BridgeTowerTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BridgeTowerAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BridgeTowerAttention(config, position_embedding_type="absolute") self.intermediate = BridgeTowerIntermediate(config) self.output = BridgeTowerOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class_definition
35,732
39,678
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,370
class BridgeTowerTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BridgeTowerTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, )
class_definition
39,785
43,597
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,371
class BridgeTowerTextEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape)
class_definition
43,707
47,895
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,372
class BridgeTowerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BridgeTowerConfig base_model_prefix = "bridgetower" supports_gradient_checkpointing = False _no_split_modules = ["BridgeTowerSelfAttention", "BridgeTowerResidualAttention"] _skip_keys_device_placement = "past_key_values" def _init_weights(self, module): if isinstance(module, BridgeTowerVisionModel): proj_std = (module.visual.transformer.hidden_size**-0.5) * ( (2 * module.visual.transformer.num_hidden_layers) ** -0.5 ) attn_std = module.visual.transformer.hidden_size**-0.5 fc_std = (2 * module.visual.transformer.hidden_size) ** -0.5 for block in module.visual.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std * self.config.initializer_factor) nn.init.normal_(block.attn.out_proj.weight, std=proj_std * self.config.initializer_factor) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std * self.config.initializer_factor) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std * self.config.initializer_factor) nn.init.normal_(module.visual.embeddings.class_embedding, std=attn_std * self.config.initializer_factor) nn.init.normal_( module.visual.embeddings.position_embedding.weight, std=attn_std * self.config.initializer_factor ) elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.05 * self.config.initializer_factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class_definition
48,665
50,662
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,373
class BridgeTowerVisionModel(BridgeTowerPreTrainedModel): config_class = BridgeTowerVisionConfig def __init__(self, config): super().__init__(config) self.visual = BridgeTowerVisionTransformer(config) @property def dtype(self): return self.visual.embeddings.patch_embedding.weight.dtype def forward(self, image, image_mask=None, interpolate_pos_encoding=False): return self.visual(image.type(self.dtype), image_mask, interpolate_pos_encoding)
class_definition
50,665
51,162
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,374
class BridgeTowerTextModel(BridgeTowerPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ config_class = BridgeTowerTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BridgeTowerTextEmbeddings(config) self.encoder = BridgeTowerTextEncoder(config) self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) # Copied from transformers.models.clap.modeling_clap.ClapTextModel.forward def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, )
class_definition
51,165
60,214
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,375
class BridgeTowerModel(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config vision_config = config.vision_config text_config = config.text_config if config.share_cross_modal_transformer_layers: self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size) self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size) else: self.cross_modal_text_transform = nn.ModuleList( [nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)] ) self.cross_modal_image_transform = nn.ModuleList( [nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)] ) self.token_type_embeddings = nn.Embedding(2, config.hidden_size) self.vision_model = BridgeTowerVisionModel(vision_config) self.text_model = BridgeTowerTextModel(text_config) if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder: for ln in self.vision_model.visual.cross_modal_ln_separate: ln.weight.data = self.vision_model.visual.ln_post.weight.data ln.bias.data = self.vision_model.visual.ln_post.bias.data self.cross_modal_image_layers = nn.ModuleList( [BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)] ) self.cross_modal_text_layers = nn.ModuleList( [BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)] ) # Class token => Linear => Tanh self.cross_modal_image_pooler = BridgeTowerPooler(config) self.cross_modal_text_pooler = BridgeTowerPooler(config) # Initialize BridgeTower Components self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.share_link_tower_layers: self.cross_modal_text_link_tower = BridgeTowerLinkTower(config) self.cross_modal_image_link_tower = BridgeTowerLinkTower(config) else: self.cross_modal_text_link_tower = nn.ModuleList( [BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)] ) self.cross_modal_image_link_tower = nn.ModuleList( [BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)] ) self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BridgeTowerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_embeds: Optional[torch.FloatTensor] = None, image_token_type_idx: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple[torch.Tensor], BridgeTowerModelOutput]: r""" output_hidden_states (`bool`, *optional*): If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image, hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and `hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and `cross_modal_image_hidden_states` of each brdige layer. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels are currently not supported. Returns: Examples: ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerModel >>> from PIL import Image >>> import requests >>> # prepare image and text >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "hello world" >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base") >>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base") >>> inputs = processor(image, text, return_tensors="pt") >>> outputs = model(**inputs) >>> outputs.keys() odict_keys(['text_features', 'image_features', 'pooler_output']) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) all_hidden_states_text = () if output_hidden_states else None all_hidden_states_image = () if output_hidden_states else None all_hidden_states_cross = () if output_hidden_states else None all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if inputs_embeds is not None and input_ids is None: raise NotImplementedError( "BridgeTowerModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead." ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict image_token_type_idx = image_token_type_idx if image_token_type_idx else 1 input_shape = input_ids.size() text_embeds = self.text_model.embeddings(input_ids=input_ids) if output_hidden_states: all_hidden_states_text += (text_embeds,) if attention_mask is None: attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device) extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to( input_ids.device ) # The split_index determines how many layers of the uni-modal encoder are applied before the cross-modal encoder split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1 # Run the first 'split_index' layers of the textual encoder for layer in self.text_model.encoder.layer[:split_index]: text_embeds = layer(text_embeds, extend_text_masks)[0] if output_hidden_states: all_hidden_states_text += (text_embeds,) if image_embeds is None: image_embeds = self.vision_model.visual.forward_pre( pixel_values.type(self.vision_model.dtype), interpolate_pos_encoding=interpolate_pos_encoding ) else: # Permute as BridgeTowerResidualAttention has batch_first=True image_embeds = image_embeds.permute(1, 0, 2) if output_hidden_states: all_hidden_states_image += (image_embeds,) # Run the first 'split_index' layers of the visual encoder for block in self.vision_model.visual.transformer.resblocks[:split_index]: image_embeds = block(image_embeds) if output_hidden_states: all_hidden_states_image += (image_embeds,) image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype)) # first layer is a special case because we don't have the output from the cross-encoder yet cross_modal_text = self.cross_modal_text_transform(text_embeds) text_token_type_embeddings = self.token_type_embeddings( torch.zeros(1, dtype=torch.long, device=input_ids.device) ).expand_as(cross_modal_text) cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings) image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln) image_token_type_embeddings = self.token_type_embeddings( torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device) ).expand_as(image_embeds_with_ln) image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln) pixel_mask = torch.ones( (cross_modal_image.size(0), cross_modal_image.size(1)), dtype=torch.long, device=input_ids.device, ) extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to( input_ids.device ) layer_outputs_text = self.cross_modal_text_layers[0]( cross_modal_text, cross_modal_image, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions, ) cross_text_features = layer_outputs_text[0] layer_outputs_image = self.cross_modal_image_layers[0]( cross_modal_image, cross_modal_text, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions, ) cross_image_features = layer_outputs_image[0] if output_hidden_states: all_hidden_states_cross += ((cross_text_features, cross_image_features),) if output_attentions: all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),) link_layer_index = 0 # Each of the top 6 layers of the visual and textual encoders ([split_index:]) is connected to each layer of # the cross-modal encoder via bridge layers, which brings bottom-up alignment and fusion to the cross-modal encoder. for i in range(split_index, len(self.text_model.encoder.layer)): text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0] image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type( self.vision_model.dtype ) image_embeds_with_ln = ( self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds)) + image_token_type_embeddings ) text_link_tower = self.cross_modal_text_link_tower[link_layer_index] image_link_tower = self.cross_modal_image_link_tower[link_layer_index] # Bridge layers for textual and visual encoders cross_text_features_ = text_link_tower( self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings, cross_text_features, extend_text_masks, ) cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks) # Cross-modal encoder via bridge layers of textual and visual encoders layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1]( cross_text_features_, cross_image_features_, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions, ) cross_text_features = layer_outputs_text[0] layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1]( cross_image_features_, cross_text_features_, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions, ) cross_image_features = layer_outputs_image[0] link_layer_index += 1 if output_hidden_states: all_hidden_states_text += (text_embeds,) all_hidden_states_image += (image_embeds,) all_hidden_states_cross += ((cross_text_features, cross_image_features),) if output_attentions: all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),) # Concatenate the cls token of the text and image features to get the final represtation text_features, image_features = cross_text_features, cross_image_features cls_features = self.get_cls_features(text_features, image_features) if output_hidden_states: all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross) if not return_dict: return tuple( v for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions] if v is not None ) return BridgeTowerModelOutput( text_features=text_features, image_features=image_features, pooler_output=cls_features, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def get_cls_features(self, text_features, image_features): cls_features_text = self.cross_modal_text_pooler(text_features) cls_features_image = self.cross_modal_image_pooler(image_features) return torch.cat([cls_features_text, cls_features_image], dim=-1)
class_definition
60,403
74,756
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,376
class BridgeTowerPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class_definition
74,863
75,540
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,377
class BridgeTowerMLMHead(nn.Module): def __init__(self, config, weight=None): super().__init__() self.config = config self.transform = BridgeTowerPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size)) if weight is not None: self.decoder.weight = weight def forward(self, x): mlm_score = self.transform(x) mlm_score = self.decoder(mlm_score) + self.bias return mlm_score
class_definition
75,543
76,139
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,378
class BridgeTowerITMHead(nn.Module): def __init__(self, hidden_size): super().__init__() self.fc = nn.Linear(hidden_size, 2) def forward(self, x): itm_score = self.fc(x) return itm_score
class_definition
76,142
76,369
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,379
class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel): _tied_weights_keys = ["mlm_score.decoder.weight"] def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.mlm_score = BridgeTowerMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.mlm_score.decoder def set_output_embeddings(self, new_embeddings): self.mlm_score.decoder = new_embeddings @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000360943.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> text = "a <mask> looking out of the window" >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # prepare inputs >>> encoding = processor(image, text, return_tensors="pt") >>> # forward pass >>> outputs = model(**encoding) >>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist()) >>> print(results) .a cat looking out of the window. ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) mlm_logits = self.mlm_score(outputs.text_features if return_dict else outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token labels = labels.to(mlm_logits.device) masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.text_config.vocab_size), labels.view(-1)) if not return_dict: output = tuple(mlm_logits) return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=mlm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
76,534
80,719
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,380
class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.itm_score = BridgeTowerITMHead(config.hidden_size * 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*): Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match. The pairs with 0 will be skipped for calculation. Returns: Examples: ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs.logits[0, 1].item() ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooler_output = outputs.pooler_output if return_dict else outputs[2] logits = self.itm_score(pooler_output) itm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) itm_loss = loss_fct(logits, labels) if not return_dict: output = tuple(logits) return ((itm_loss,) + output) if itm_loss is not None else output return SequenceClassifierOutput( loss=itm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
80,962
84,713
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,381
class BridgeTowerContrastiveHead(nn.Module): def __init__(self, hidden_size, embed_size): super().__init__() self.fc = nn.Linear(hidden_size, embed_size) def forward(self, x): x = self.fc(x) return x
class_definition
84,716
84,956
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,382
class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.itc_text_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size) self.itc_image_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size) self.itc_cross_modal_head = BridgeTowerContrastiveHead(config.hidden_size * 2, config.contrastive_hidden_size) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BridgeTowerContrastiveOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = True, return_dict: Optional[bool] = None, return_loss: Optional[bool] = None, ) -> Union[BridgeTowerContrastiveOutput, Tuple[torch.FloatTensor]]: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Returns: Examples: ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning >>> import requests >>> from PIL import Image >>> import torch >>> image_urls = [ ... "https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg", ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... ] >>> texts = ["two dogs in a car", "two cats sleeping on a couch"] >>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> inputs = processor(images, texts, padding=True, return_tensors="pt") >>> loss = model(**inputs, return_loss=True).loss >>> inputs = processor(images, texts[::-1], padding=True, return_tensors="pt") >>> loss_swapped = model(**inputs, return_loss=True).loss >>> print("Loss", round(loss.item(), 4)) Loss 0.0019 >>> print("Loss with swapped images", round(loss_swapped.item(), 4)) Loss with swapped images 2.126 ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) pooler_output = outputs.pooler_output if return_dict else outputs[2] hidden_states_txt, hidden_states_img, hidden_states_cross_modal = ( outputs.hidden_states if return_dict else outputs[3] ) text_embeds = hidden_states_txt[-1] image_embeds = hidden_states_img[-1] image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds) image_token_type_embeddings = self.bridgetower.token_type_embeddings( torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device) ).expand_as(image_embeds_with_ln) image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings # normalized features text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2) image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to( device=text_embeds.device ) cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to( device=text_embeds.device ) logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2) logit_scale = self.logit_scale.exp().to(device=text_embeds.device) logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale itc_loss = None if return_loss: labels = torch.arange(len(logits), device=logits.device) text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels) text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels) image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels) itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0 if not return_dict: output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:] return ((itc_loss,) + output) if itc_loss is not None else output return BridgeTowerContrastiveOutput( loss=itc_loss, logits=logits, text_embeds=text_embeds, image_embeds=image_embeds, cross_embeds=cross_embeds, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
85,137
91,386
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/modeling_bridgetower.py
null
8,383
class BridgeTowerImageProcessor(BaseImageProcessor): r""" Constructs a BridgeTower image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{'shortest_edge': 288}`): Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under `int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method. size_divisor (`int`, *optional*, defaults to 32): The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*): Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. Can be overridden by the `crop_size` parameter in the `preprocess` method. If unset defaults to `size`, do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 288} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_pad = do_pad self.do_center_crop = do_center_crop self.crop_size = crop_size # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then resized to the max size while preserving the aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Controls the size of the output image. Should be of the form `{"shortest_edge": int}`. size_divisor (`int`, *optional*, defaults to 32): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size: raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}") shorter = size["shortest_edge"] longer = int(1333 / 800 * shorter) output_size = get_resize_output_image_size( image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image in the form `{"height": h, "width": w}`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ output_size = size["shortest_edge"] return center_crop( image, size=(output_size, output_size), data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, do_center_crop: Optional[bool] = None, crop_size: Dict[str, int] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. size_divisor (`int`, *optional*, defaults to `self.size_divisor`): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also created and returned. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be padded with zeros and then cropped return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad do_center_crop if do_center_crop is not None else self.do_center_crop # For backwards compatibility. Initial version of this processor was cropping to the "size" argument, which # it should default to if crop_size is undefined. crop_size = ( crop_size if crop_size is not None else (self.crop_size if self.crop_size is not None else self.size) ) size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) if not is_batched(images): images = [images] if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # Here, crop_size is used only if it is set, else size will be used. validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size_divisor, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if do_resize: images = [ self.resize( image=image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format, ) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] if do_pad: encoded_outputs = self.pad( images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format ) else: encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs
class_definition
4,382
26,290
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/bridgetower/image_processing_bridgetower.py
null
8,384
class MT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the MT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # MT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class_definition
4,113
5,209
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,385
class MT5DenseActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class_definition
5,290
6,151
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,386
class MT5DenseGatedActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class_definition
6,237
7,526
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,387
class MT5LayerFF(nn.Module): def __init__(self, config: MT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = MT5DenseGatedActDense(config) else: self.DenseReluDense = MT5DenseActDense(config) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class_definition
7,601
8,272
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,388
class MT5Attention(nn.Module): def __init__( self, config: MT5Config, has_relative_attention_bias=False, layer_idx: Optional[int] = None, ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) batch_size, seq_length = hidden_states.shape[:2] # if key_value_states are provided this layer is used as a cross-attention layer for the decoder is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_value is not None: is_updated = past_key_value.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_value.cross_attention_cache else: curr_past_key_value = past_key_value.self_attention_cache current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_value is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.key_cache[self.layer_idx] value_states = curr_past_key_value.value_cache[self.layer_idx] else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_value is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_value.is_updated[self.layer_idx] = True # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device, cache_position=cache_position ) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, : key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, past_key_value, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class_definition
8,349
19,583
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,389
class MT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.SelfAttention = MT5Attention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs
class_definition
19,669
21,022
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,390
class MT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs
class_definition
21,109
22,525
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,391
class MT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( MT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) ) if self.is_decoder: self.layer.append(MT5LayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(MT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states, past_key_value = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, past_key_value = cross_attention_outputs[:2] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (past_key_value,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, past_key_value, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class_definition
22,598
26,739
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,392
class MT5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: MT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states
class_definition
31,366
32,082
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,393
class MT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MT5Config load_tf_weights = load_tf_weights_in_mt5 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _supports_quantized_cache = False # enc-dec models don't support yet _supports_static_cache = True _supports_cache_class = True _no_split_modules = ["MT5Block"] _keep_in_fp32_modules = ["wo"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, MT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, (MT5Model, MT5ForConditionalGeneration, MT5EncoderModel, MT5ForQuestionAnswering), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, MT5ForTokenClassification): if hasattr(module, "classifier"): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, MT5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.dense, "bias") and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, MT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id. " "See MT5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
class_definition
32,174
38,609
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,394
class MT5Stack(MT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [MT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)] ) self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`MT5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") # initialize past_key_values return_legacy_cache = False return_self_attention_cache = False if self.is_decoder and (use_cache or past_key_values is not None): if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache): return_self_attention_cache = True past_key_values = EncoderDecoderCache(past_key_values, DynamicCache()) elif not isinstance(past_key_values, EncoderDecoderCache): return_legacy_cache = True logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) elif past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if past_key_values is not None else None, output_attentions, ) elif attention_mask is not None: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min else: causal_mask = None # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long ) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if causal_mask is not None: causal_mask = causal_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if cross_attn_layer_head_mask is not None: cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, return_dict, cache_position, ) else: layer_outputs = layer_module( hidden_states, attention_mask=causal_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, next_decoder_cache = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_self_attention_cache: next_cache = past_key_values.self_attention_cache if return_legacy_cache: next_cache = past_key_values.to_legacy_cache() if not return_dict: return tuple( v for v in [ hidden_states, next_cache, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
38,682
59,749
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,395
class MT5Model(MT5PreTrainedModel): r""" Examples: ```python >>> from transformers import MT5Model, AutoTokenizer >>> model = MT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="pt") >>> labels = tokenizer(text_target=summary, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5Model.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5Model.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0':" " 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5Model.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder def get_decoder(self): return self.decoder # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5Model.forward with google-t5/->google/, T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, MT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5Model.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model. >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )
class_definition
69,896
80,097
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,396
class MT5ForConditionalGeneration(MT5PreTrainedModel, GenerationMixin): r""" Examples: ```python >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = "mt5" config_class = MT5Config _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you" " should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also" " provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance" " {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.forward with google-t5/->google/, T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past
class_definition
80,200
94,780
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,397
class MT5EncoderModel(MT5PreTrainedModel): r""" Examples: ```python >>> from transformers import MT5EncoderModel, AutoTokenizer >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config _tied_weights_keys = ["encoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(MT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with google-t5/->google/, T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, MT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
class_definition
94,944
100,418
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,398
class MT5ForSequenceClassification(MT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.transformer = MT5Model(config) self.classification_head = MT5ClassificationHead(config) # Initialize weights and apply final processing self.post_init() self.model_parallel = False @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates # decoder_input_ids from input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, )
class_definition
100,615
106,909
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mt5/modeling_mt5.py
null
8,399