text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 1 src_skip: List = field(default_factory=list) dest_skip: List = field(default_factory=list) raise_if_mismatch: bool = True def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced) and self.raise_if_mismatch: raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}")
class_definition
2,187
3,429
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py
null
8,600
class FakeRegNetVisslWrapper(nn.Module): """ Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. """ def __init__(self, model: nn.Module): super().__init__() feature_blocks: List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), f"Unexpected layer name {k}" block_index = len(feature_blocks) + 1 feature_blocks.append((f"res{block_index}", v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs( x, out_feat_keys=None, feature_blocks=self._feature_blocks, )
class_definition
3,432
4,312
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py
null
8,601
class NameToFromModelFuncMap(dict): """ A Dictionary with some additional logic to return a function that creates the correct original model. """ def convert_name_to_timm(self, x: str) -> str: x_split = x.split("-") return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) def __getitem__(self, x: str) -> Callable[[], Tuple[nn.Module, Dict]]: # default to timm! if x not in self: x = self.convert_name_to_timm(x) val = partial(lambda: (timm.create_model(x, pretrained=True).eval(), None)) else: val = super().__getitem__(x) return val
class_definition
4,315
4,961
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py
null
8,602
class NameToOurModelFuncMap(dict): """ A Dictionary with some additional logic to return the correct hugging face RegNet class reference. """ def __getitem__(self, x: str) -> Callable[[], nn.Module]: if "seer" in x and "in1k" not in x: val = RegNetModel else: val = RegNetForImageClassification return val
class_definition
4,964
5,334
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py
null
8,603
class TFRegNetConvLayer(keras.layers.Layer): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, activation: Optional[str] = "relu", **kwargs, ): super().__init__(**kwargs) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb self.padding = keras.layers.ZeroPadding2D(padding=kernel_size // 2) self.convolution = keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, padding="VALID", groups=groups, use_bias=False, name="convolution", ) self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else tf.identity self.in_channels = in_channels self.out_channels = out_channels def call(self, hidden_state): hidden_state = self.convolution(self.padding(hidden_state)) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels])
class_definition
1,653
3,492
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,604
class TFRegNetEmbeddings(keras.layers.Layer): """ RegNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: RegNetConfig, **kwargs): super().__init__(**kwargs) self.num_channels = config.num_channels self.embedder = TFRegNetConvLayer( in_channels=config.num_channels, out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name="embedder", ) def call(self, pixel_values): num_channels = shape_list(pixel_values)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) hidden_state = self.embedder(pixel_values) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None)
class_definition
3,495
4,988
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,605
class TFRegNetShortCut(keras.layers.Layer): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs): super().__init__(**kwargs) self.convolution = keras.layers.Conv2D( filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.in_channels = in_channels self.out_channels = out_channels def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: return self.normalization(self.convolution(inputs), training=training) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels])
class_definition
4,991
6,329
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,606
class TFRegNetSELayer(keras.layers.Layer): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). """ def __init__(self, in_channels: int, reduced_channels: int, **kwargs): super().__init__(**kwargs) self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") self.attention = [ keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"), keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"), ] self.in_channels = in_channels self.reduced_channels = reduced_channels def call(self, hidden_state): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] pooled = self.pooler(hidden_state) for layer_module in self.attention: pooled = layer_module(pooled) hidden_state = hidden_state * pooled return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build((None, None, None, None)) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention[0].name): self.attention[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.attention[1].name): self.attention[1].build([None, None, None, self.reduced_channels])
class_definition
6,332
7,975
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,607
class TFRegNetXLayer(keras.layers.Layer): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) # `self.layers` instead of `self.layer` because that is a reserved argument. self.layers = [ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), TFRegNetConvLayer( out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" ), TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.2"), ] self.activation = ACT2FN[config.hidden_act] def call(self, hidden_state): residual = hidden_state for layer_module in self.layers: hidden_state = layer_module(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
7,978
9,974
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,608
class TFRegNetYLayer(keras.layers.Layer): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.layers = [ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), TFRegNetConvLayer( out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" ), TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"), TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.3"), ] self.activation = ACT2FN[config.hidden_act] def call(self, hidden_state): residual = hidden_state for layer_module in self.layers: hidden_state = layer_module(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
9,977
11,944
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,609
class TFRegNetStage(keras.layers.Layer): """ A RegNet stage composed by stacked layers. """ def __init__( self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs ): super().__init__(**kwargs) layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer self.layers = [ # downsampling is done in the first layer with stride of 2 layer(config, in_channels, out_channels, stride=stride, name="layers.0"), *[layer(config, out_channels, out_channels, name=f"layers.{i+1}") for i in range(depth - 1)], ] def call(self, hidden_state): for layer_module in self.layers: hidden_state = layer_module(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
11,947
13,047
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,610
class TFRegNetEncoder(keras.layers.Layer): def __init__(self, config: RegNetConfig, **kwargs): super().__init__(**kwargs) self.stages = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])): self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i+1}")) def call( self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> TFBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) def build(self, input_shape=None): if self.built: return self.built = True for stage in self.stages: with tf.name_scope(stage.name): stage.build(None)
class_definition
13,050
14,904
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,611
class TFRegNetMainLayer(keras.layers.Layer): config_class = RegNetConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embedder = TFRegNetEmbeddings(config, name="embedder") self.encoder = TFRegNetEncoder(config, name="encoder") self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> TFBaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Change to NCHW output format have uniformity in the modules pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2)) last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build((None, None, None, None))
class_definition
14,927
17,534
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,612
class TFRegNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" @property def input_signature(self): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)}
class_definition
17,537
17,995
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,613
class TFRegNetModel(TFRegNetPreTrainedModel): def __init__(self, config: RegNetConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.regnet = TFRegNetMainLayer(config, name="regnet") @unpack_inputs @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regnet", None) is not None: with tf.name_scope(self.regnet.name): self.regnet.build(None)
class_definition
19,394
21,231
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,614
class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: RegNetConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.regnet = TFRegNetMainLayer(config, name="regnet") # classification head self.classifier = [ keras.layers.Flatten(), keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) pooled_output = outputs.pooler_output if return_dict else outputs[1] flattened_output = self.classifier[0](pooled_output) logits = self.classifier[1](flattened_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regnet", None) is not None: with tf.name_scope(self.regnet.name): self.regnet.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier[1].name): self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]])
class_definition
21,433
24,299
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_tf_regnet.py
null
8,615
class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) name2module: Dict[str, nn.Module] = field(default_factory=OrderedDict) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: self.traced.append(m) self.name2module[name] = m def __call__(self, x: Tensor): for name, m in self.module.named_modules(): self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name))) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0}
class_definition
1,550
2,561
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
null
8,616
class FakeRegNetVisslWrapper(nn.Module): """ Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. """ def __init__(self, model: nn.Module): super().__init__() feature_blocks: List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), f"Unexpected layer name {k}" block_index = len(feature_blocks) + 1 feature_blocks.append((f"res{block_index}", v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs( x, out_feat_keys=None, feature_blocks=self._feature_blocks, )
class_definition
2,564
3,444
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
null
8,617
class FakeRegNetParams(RegNetParams): """ Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small parameters, so we can trace it in memory. """ def get_expanded_params(self): return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)]
class_definition
3,447
3,789
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
null
8,618
class Identity(nn.Module): """Identity function.""" @nn.compact def __call__(self, x, **kwargs): return x
class_definition
4,173
4,299
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,619
class FlaxRegNetConvLayer(nn.Module): out_channels: int kernel_size: int = 3 stride: int = 1 groups: int = 1 activation: Optional[str] = "relu" dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(self.kernel_size, self.kernel_size), strides=self.stride, padding=self.kernel_size // 2, feature_group_count=self.groups, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity() def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) hidden_state = self.activation_func(hidden_state) return hidden_state
class_definition
4,302
5,469
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,620
class FlaxRegNetEmbeddings(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embedder = FlaxRegNetConvLayer( self.config.embedding_size, kernel_size=3, stride=2, activation=self.config.hidden_act, dtype=self.dtype, ) def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: num_channels = pixel_values.shape[-1] if num_channels != self.config.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = self.embedder(pixel_values, deterministic=deterministic) return hidden_state
class_definition
5,472
6,279
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,621
class FlaxRegNetShortCut(nn.Module): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ out_channels: int stride: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(1, 1), strides=self.stride, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(x) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) return hidden_state
class_definition
6,383
7,335
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,622
class FlaxRegNetSELayerCollection(nn.Module): in_channels: int reduced_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_1 = nn.Conv( self.reduced_channels, kernel_size=(1, 1), kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, name="0", ) # 0 is the name used in corresponding pytorch implementation self.conv_2 = nn.Conv( self.in_channels, kernel_size=(1, 1), kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, name="2", ) # 2 is the name used in corresponding pytorch implementation def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: hidden_state = self.conv_1(hidden_state) hidden_state = nn.relu(hidden_state) hidden_state = self.conv_2(hidden_state) attention = nn.sigmoid(hidden_state) return attention
class_definition
7,338
8,431
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,623
class FlaxRegNetSELayer(nn.Module): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). """ in_channels: int reduced_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.pooler = partial(nn.avg_pool, padding=((0, 0), (0, 0))) self.attention = FlaxRegNetSELayerCollection(self.in_channels, self.reduced_channels, dtype=self.dtype) def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: pooled = self.pooler( hidden_state, window_shape=(hidden_state.shape[1], hidden_state.shape[2]), strides=(hidden_state.shape[1], hidden_state.shape[2]), ) attention = self.attention(pooled) hidden_state = hidden_state * attention return hidden_state
class_definition
8,434
9,283
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,624
class FlaxRegNetXLayerCollection(nn.Module): config: RegNetConfig out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): groups = max(1, self.out_channels // self.config.groups_width) self.layer = [ FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=self.config.hidden_act, dtype=self.dtype, name="0", ), FlaxRegNetConvLayer( self.out_channels, stride=self.stride, groups=groups, activation=self.config.hidden_act, dtype=self.dtype, name="1", ), FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="2", ), ] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state
class_definition
9,286
10,484
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,625
class FlaxRegNetXLayer(nn.Module): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxRegNetShortCut( self.out_channels, stride=self.stride, dtype=self.dtype, ) if should_apply_shortcut else Identity() ) self.layer = FlaxRegNetXLayerCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, dtype=self.dtype, ) self.activation_func = ACT2FN[self.config.hidden_act] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual, deterministic=deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state
class_definition
10,487
11,796
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,626
class FlaxRegNetYLayerCollection(nn.Module): config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): groups = max(1, self.out_channels // self.config.groups_width) self.layer = [ FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=self.config.hidden_act, dtype=self.dtype, name="0", ), FlaxRegNetConvLayer( self.out_channels, stride=self.stride, groups=groups, activation=self.config.hidden_act, dtype=self.dtype, name="1", ), FlaxRegNetSELayer( self.out_channels, reduced_channels=int(round(self.in_channels / 4)), dtype=self.dtype, name="2", ), FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="3", ), ] def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state) return hidden_state
class_definition
11,799
13,169
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,627
class FlaxRegNetYLayer(nn.Module): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxRegNetShortCut( self.out_channels, stride=self.stride, dtype=self.dtype, ) if should_apply_shortcut else Identity() ) self.layer = FlaxRegNetYLayerCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, dtype=self.dtype, ) self.activation_func = ACT2FN[self.config.hidden_act] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual, deterministic=deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state
class_definition
13,172
14,433
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,628
class FlaxRegNetStageLayersCollection(nn.Module): """ A RegNet stage composed by stacked layers. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): layer = FlaxRegNetXLayer if self.config.layer_type == "x" else FlaxRegNetYLayer layers = [ # downsampling is done in the first layer with stride of 2 layer( self.config, self.in_channels, self.out_channels, stride=self.stride, dtype=self.dtype, name="0", ) ] for i in range(self.depth - 1): layers.append( layer( self.config, self.out_channels, self.out_channels, dtype=self.dtype, name=str(i + 1), ) ) self.layers = layers def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = x for layer in self.layers: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state
class_definition
14,436
15,715
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,629
class FlaxRegNetStage(nn.Module): """ A RegNet stage composed by stacked layers. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = FlaxRegNetStageLayersCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, depth=self.depth, dtype=self.dtype, ) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: return self.layers(x, deterministic=deterministic)
class_definition
15,816
16,488
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,630
class FlaxRegNetStageCollection(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): in_out_channels = zip(self.config.hidden_sizes, self.config.hidden_sizes[1:]) stages = [ FlaxRegNetStage( self.config, self.config.embedding_size, self.config.hidden_sizes[0], stride=2 if self.config.downsample_in_first_stage else 1, depth=self.config.depths[0], dtype=self.dtype, name="0", ) ] for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, self.config.depths[1:])): stages.append( FlaxRegNetStage(self.config, in_channels, out_channels, depth=depth, dtype=self.dtype, name=str(i + 1)) ) self.stages = stages def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) hidden_state = stage_module(hidden_state, deterministic=deterministic) return hidden_state, hidden_states
class_definition
16,599
18,030
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,631
class FlaxRegNetEncoder(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.stages = FlaxRegNetStageCollection(self.config, dtype=self.dtype) def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_state, hidden_states = self.stages( hidden_state, output_hidden_states=output_hidden_states, deterministic=deterministic ) if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return FlaxBaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, )
class_definition
18,133
19,090
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,632
class FlaxRegNetPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" module_class: nn.Module = None def __init__( self, config: RegNetConfig, input_shape=(1, 224, 224, 3), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors pixel_values = jnp.zeros(input_shape, dtype=self.dtype) rngs = {"params": rng} random_params = self.module.init(rngs, pixel_values, return_dict=False) if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) def __call__( self, pixel_values, params: dict = None, train: bool = False, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) # Handle any PRNG if needed rngs = {} return self.module.apply( { "params": params["params"] if params is not None else self.params["params"], "batch_stats": params["batch_stats"] if params is not None else self.params["batch_stats"], }, jnp.array(pixel_values, dtype=jnp.float32), not train, output_hidden_states, return_dict, rngs=rngs, mutable=["batch_stats"] if train else False, # Returing tuple with batch_stats only when train is True )
class_definition
19,231
22,011
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,633
class FlaxRegNetModule(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embedder = FlaxRegNetEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRegNetEncoder(self.config, dtype=self.dtype) # Adaptive average pooling used in resnet self.pooler = partial( nn.avg_pool, padding=((0, 0), (0, 0)), ) def __call__( self, pixel_values, deterministic: bool = True, output_hidden_states: bool = False, return_dict: bool = True, ) -> FlaxBaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values, deterministic=deterministic) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler( last_hidden_state, window_shape=(last_hidden_state.shape[1], last_hidden_state.shape[2]), strides=(last_hidden_state.shape[1], last_hidden_state.shape[2]), ).transpose(0, 3, 1, 2) last_hidden_state = last_hidden_state.transpose(0, 3, 1, 2) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return FlaxBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
22,113
24,034
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,634
class FlaxRegNetModel(FlaxRegNetPreTrainedModel): module_class = FlaxRegNetModule
class_definition
24,177
24,262
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,635
class FlaxRegNetClassifierCollection(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, name="1") def __call__(self, x: jnp.ndarray) -> jnp.ndarray: return self.classifier(x)
class_definition
25,253
25,560
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,636
class FlaxRegNetForImageClassificationModule(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.regnet = FlaxRegNetModule(config=self.config, dtype=self.dtype) if self.config.num_labels > 0: self.classifier = FlaxRegNetClassifierCollection(self.config, dtype=self.dtype) else: self.classifier = Identity() def __call__( self, pixel_values=None, deterministic: bool = True, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values, deterministic=deterministic, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output[:, :, 0, 0]) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxImageClassifierOutputWithNoAttention(logits=logits, hidden_states=outputs.hidden_states)
class_definition
25,714
26,923
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,637
class FlaxRegNetForImageClassification(FlaxRegNetPreTrainedModel): module_class = FlaxRegNetForImageClassificationModule
class_definition
27,125
27,249
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_flax_regnet.py
null
8,638
class Pop2PianoLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the Pop2Piano style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # Pop2Piano uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class_definition
8,738
9,852
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,639
class Pop2PianoDenseActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class_definition
10,085
10,958
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,640
class Pop2PianoDenseGatedActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class_definition
11,050
12,351
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,641
class Pop2PianoLayerFF(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() if config.is_gated_act: self.DenseReluDense = Pop2PianoDenseGatedActDense(config) else: self.DenseReluDense = Pop2PianoDenseActDense(config) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class_definition
12,432
13,133
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,642
class Pop2PianoAttention(nn.Module): def __init__( self, config: Pop2PianoConfig, has_relative_attention_bias=False, layer_idx: Optional[int] = None, ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) batch_size, seq_length = hidden_states.shape[:2] # if key_value_states are provided this layer is used as a cross-attention layer for the decoder is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_value is not None: is_updated = past_key_value.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_value.cross_attention_cache else: curr_past_key_value = past_key_value.self_attention_cache current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_value is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.key_cache[self.layer_idx] value_states = curr_past_key_value.value_cache[self.layer_idx] else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_value is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_value.is_updated[self.layer_idx] = True # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device, cache_position=cache_position ) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, : key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, past_key_value, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class_definition
13,230
24,476
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,643
class Pop2PianoLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.SelfAttention = Pop2PianoAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs
class_definition
24,582
25,953
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,644
class Pop2PianoLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.EncDecAttention = Pop2PianoAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs
class_definition
26,060
27,494
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,645
class Pop2PianoBlock(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( Pop2PianoLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) ) if self.is_decoder: self.layer.append(Pop2PianoLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(Pop2PianoLayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states, past_key_value = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, past_key_value = cross_attention_outputs[:2] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (past_key_value,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, past_key_value, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class_definition
27,587
31,782
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,646
class Pop2PianoPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Pop2PianoConfig base_model_prefix = "transformer" is_parallelizable = False supports_gradient_checkpointing = True _supports_cache_class = True _supports_static_cache = False _no_split_modules = ["Pop2PianoBlock"] _keep_in_fp32_modules = ["wo"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, Pop2PianoLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pop2PianoConcatEmbeddingToMel): module.embedding.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoForConditionalGeneration): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoDenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In Pop2Piano it is usually set to the pad_token_id." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
class_definition
31,785
36,854
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,647
class Pop2PianoStack(Pop2PianoPreTrainedModel): # Copied from transformers.models.t5.modeling_t5.T5Stack.__init__ with T5->Pop2Piano,t5->pop2piano def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [ Pop2PianoBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers) ] ) self.final_layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") # initialize past_key_values return_legacy_cache = False return_self_attention_cache = False if self.is_decoder and (use_cache or past_key_values is not None): if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache): return_self_attention_cache = True past_key_values = EncoderDecoderCache(past_key_values, DynamicCache()) elif not isinstance(past_key_values, EncoderDecoderCache): return_legacy_cache = True logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) elif past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if past_key_values is not None else None, output_attentions, ) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, cache_position, ) else: layer_outputs = layer_module( hidden_states, attention_mask=causal_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, next_decoder_cache = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_self_attention_cache: next_cache = past_key_values.self_attention_cache if return_legacy_cache: next_cache = past_key_values.to_legacy_cache() if not return_dict: return tuple( v for v in [ hidden_states, next_cache, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
36,857
54,214
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,648
class Pop2PianoConcatEmbeddingToMel(nn.Module): """Embedding Matrix for `composer` tokens.""" def __init__(self, config): super().__init__() self.embedding = nn.Embedding(num_embeddings=config.composer_vocab_size, embedding_dim=config.d_model) def forward(self, feature, index_value, embedding_offset): index_shifted = index_value - embedding_offset composer_embedding = self.embedding(index_shifted).unsqueeze(1) inputs_embeds = torch.cat([composer_embedding, feature], dim=1) return inputs_embeds
class_definition
54,217
54,777
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,649
class Pop2PianoForConditionalGeneration(Pop2PianoPreTrainedModel, GenerationMixin): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: Pop2PianoConfig): super().__init__(config) self.config = config self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) self.mel_conditioner = Pop2PianoConcatEmbeddingToMel(config) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = Pop2PianoStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = Pop2PianoStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_mel_conditioner_outputs( self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: torch.FloatTensor = None, ): """ This method is used to concatenate mel conditioner tokens at the front of the input_features in order to control the type of MIDI token generated by the model. Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): input features extracted from the feature extractor. composer (`str`): composer token which determines the type of MIDI tokens to be generated. generation_config (`~generation.GenerationConfig`): The generation is used to get the composer-feature_token pair. attention_mask (``, *optional*): For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. """ composer_to_feature_token = generation_config.composer_to_feature_token if composer not in composer_to_feature_token.keys(): raise ValueError( f"Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}" ) composer_value = composer_to_feature_token[composer] composer_value = torch.tensor(composer_value, device=self.device) composer_value = composer_value.repeat(input_features.shape[0]) embedding_offset = min(composer_to_feature_token.values()) input_features = self.mel_conditioner( feature=input_features, index_value=composer_value, embedding_offset=embedding_offset, ) if attention_mask is not None: input_features[~attention_mask[:, 0].bool()] = 0.0 # since self.mel_conditioner adds a new array at the front of inputs_embeds we need to do the same for attention_mask to keep the shapes same attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1) return input_features, attention_mask return input_features, None @add_start_docstrings_to_model_forward(POP2PIANO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, input_features: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None and input_features is not None: raise ValueError("Both `inputs_embeds` and `input_features` received! Please provide only one of them") elif input_features is not None and inputs_embeds is None: inputs_embeds = input_features # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_features, attention_mask=None, composer="composer1", generation_config=None, **kwargs, ): """ Generates token ids for midi outputs. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): This is the featurized version of audio generated by `Pop2PianoFeatureExtractor`. attention_mask: For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. composer (`str`, *optional*, defaults to `"composer1"`): This value is passed to `Pop2PianoConcatEmbeddingToMel` to generate different embeddings for each `"composer"`. Please make sure that the composet value is present in `composer_to_feature_token` in `generation_config`. For an example please see https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json . generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. kwargs: Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. Since Pop2Piano is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config.update(**kwargs) # check for composer_to_feature_token if not hasattr(generation_config, "composer_to_feature_token"): raise ValueError( "`composer_to_feature_token` was not found! Please refer to " "https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json" "and parse a dict like that." ) if len(generation_config.composer_to_feature_token) != self.config.composer_vocab_size: raise ValueError( "config.composer_vocab_size must be same as the number of keys in " f"generation_config.composer_to_feature_token! " f"Found {self.config.composer_vocab_size} vs {len(generation_config.composer_to_feature_token)}." ) # to control the variation of generated MIDI tokens we concatenate mel-conditioner tokens(which depends on composer_token) # at the front of input_features. input_features, attention_mask = self.get_mel_conditioner_outputs( input_features=input_features, attention_mask=attention_mask, composer=composer, generation_config=generation_config, ) return super().generate( inputs=None, inputs_embeds=input_features, attention_mask=attention_mask, generation_config=generation_config, **kwargs, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past
class_definition
55,764
72,008
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/modeling_pop2piano.py
null
8,650
class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Pop2Piano feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts rhythm and preprocesses the audio before it is passed to the model. First the audio is passed to `RhythmExtractor2013` algorithm which extracts the beat_times, beat positions and estimates their confidence as well as tempo in bpm, then beat_times is interpolated and to get beatsteps. Later we calculate extrapolated_beatsteps from it to be used in tokenizer. On the other hand audio is resampled to self.sampling_rate and preprocessed and then log mel spectogram is computed from that to be used in our transformer model. Args: sampling_rate (`int`, *optional*, defaults to 22050): Target Sampling rate of audio signal. It's the sampling rate that we forward to the model. padding_value (`int`, *optional*, defaults to 0): Padding value used to pad the audio. Should correspond to silences. window_size (`int`, *optional*, defaults to 4096): Length of the window in samples to which the Fourier transform is applied. hop_length (`int`, *optional*, defaults to 1024): Step size between each window of the waveform, in samples. min_frequency (`float`, *optional*, defaults to 10.0): Lowest frequency that will be used in the log-mel spectrogram. feature_size (`int`, *optional*, defaults to 512): The feature dimension of the extracted features. num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. """ model_input_names = ["input_features", "beatsteps", "extrapolated_beatstep"] def __init__( self, sampling_rate: int = 22050, padding_value: int = 0, window_size: int = 4096, hop_length: int = 1024, min_frequency: float = 10.0, feature_size: int = 512, num_bars: int = 2, **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs, ) self.sampling_rate = sampling_rate self.padding_value = padding_value self.window_size = window_size self.hop_length = hop_length self.min_frequency = min_frequency self.feature_size = feature_size self.num_bars = num_bars self.mel_filters = mel_filter_bank( num_frequency_bins=(self.window_size // 2) + 1, num_mel_filters=self.feature_size, min_frequency=self.min_frequency, max_frequency=float(self.sampling_rate // 2), sampling_rate=self.sampling_rate, norm=None, mel_scale="htk", ) def mel_spectrogram(self, sequence: np.ndarray): """ Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed. """ mel_specs = [] for seq in sequence: window = np.hanning(self.window_size + 1)[:-1] mel_specs.append( spectrogram( waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, ) ) mel_specs = np.array(mel_specs) return mel_specs def extract_rhythm(self, audio: np.ndarray): """ This algorithm(`RhythmExtractor2013`) extracts the beat positions and estimates their confidence as well as tempo in bpm for an audio signal. For more information please visit https://essentia.upf.edu/reference/std_RhythmExtractor2013.html . Args: audio(`numpy.ndarray`): raw audio waveform which is passed to the Rhythm Extractor. """ requires_backends(self, ["essentia"]) essentia_tracker = essentia.standard.RhythmExtractor2013(method="multifeature") bpm, beat_times, confidence, estimates, essentia_beat_intervals = essentia_tracker(audio) return bpm, beat_times, confidence, estimates, essentia_beat_intervals def interpolate_beat_times( self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray ): """ This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation. """ requires_backends(self, ["scipy"]) beat_times_function = scipy.interpolate.interp1d( np.arange(beat_times.size), beat_times, bounds_error=False, fill_value="extrapolate", ) ext_beats = beat_times_function( np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend) ) return ext_beats def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): """ Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0]. """ if audio is not None and len(audio.shape) != 1: raise ValueError( f"Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}." ) if beatstep[0] > 0.0: beatstep = beatstep - beatstep[0] num_steps = self.num_bars * 4 num_target_steps = len(beatstep) extrapolated_beatstep = self.interpolate_beat_times( beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1 ) sample_indices = [] max_feature_length = 0 for i in range(0, num_target_steps, num_steps): start_idx = i end_idx = min(i + num_steps, num_target_steps) start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate) end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate) sample_indices.append((start_sample, end_sample)) max_feature_length = max(max_feature_length, end_sample - start_sample) padded_batch = [] for start_sample, end_sample in sample_indices: feature = audio[start_sample:end_sample] padded_feature = np.pad( feature, ((0, max_feature_length - feature.shape[0]),), "constant", constant_values=0, ) padded_batch.append(padded_feature) padded_batch = np.asarray(padded_batch) return padded_batch, extrapolated_beatstep def _pad(self, features: np.ndarray, add_zero_line=True): features_shapes = [each_feature.shape for each_feature in features] attention_masks, padded_features = [], [] for i, each_feature in enumerate(features): # To pad "input_features". if len(each_feature.shape) == 3: features_pad_value = max([*zip(*features_shapes)][1]) - features_shapes[i][1] attention_mask = np.ones(features_shapes[i][:2], dtype=np.int64) feature_padding = ((0, 0), (0, features_pad_value), (0, 0)) attention_mask_padding = (feature_padding[0], feature_padding[1]) # To pad "beatsteps" and "extrapolated_beatstep". else: each_feature = each_feature.reshape(1, -1) features_pad_value = max([*zip(*features_shapes)][0]) - features_shapes[i][0] attention_mask = np.ones(features_shapes[i], dtype=np.int64).reshape(1, -1) feature_padding = attention_mask_padding = ((0, 0), (0, features_pad_value)) each_padded_feature = np.pad(each_feature, feature_padding, "constant", constant_values=self.padding_value) attention_mask = np.pad( attention_mask, attention_mask_padding, "constant", constant_values=self.padding_value ) if add_zero_line: # if it is batched then we seperate each examples using zero array zero_array_len = max([*zip(*features_shapes)][1]) # we concatenate the zero array line here each_padded_feature = np.concatenate( [each_padded_feature, np.zeros([1, zero_array_len, self.feature_size])], axis=0 ) attention_mask = np.concatenate( [attention_mask, np.zeros([1, zero_array_len], dtype=attention_mask.dtype)], axis=0 ) padded_features.append(each_padded_feature) attention_masks.append(attention_mask) padded_features = np.concatenate(padded_features, axis=0).astype(np.float32) attention_masks = np.concatenate(attention_masks, axis=0).astype(np.int64) return padded_features, attention_masks def pad( self, inputs: BatchFeature, is_batched: bool, return_attention_mask: bool, return_tensors: Optional[Union[str, TensorType]] = None, ): """ Pads the inputs to same length and returns attention_mask. Args: inputs (`BatchFeature`): Processed audio features. is_batched (`bool`): Whether inputs are batched or not. return_attention_mask (`bool`): Whether to return attention mask or not. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. Return: `BatchFeature` with attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep added to it: - **attention_mask** numpy.ndarray of shape `(batch_size, max_input_features_seq_length)` -- Example : 1, 1, 1, 0, 0 (audio 1, also here it is padded to max length of 5 thats why there are 2 zeros at the end indicating they are padded) 0, 0, 0, 0, 0 (zero pad to seperate audio 1 and 2) 1, 1, 1, 1, 1 (audio 2) 0, 0, 0, 0, 0 (zero pad to seperate audio 2 and 3) 1, 1, 1, 1, 1 (audio 3) - **attention_mask_beatsteps** numpy.ndarray of shape `(batch_size, max_beatsteps_seq_length)` - **attention_mask_extrapolated_beatstep** numpy.ndarray of shape `(batch_size, max_extrapolated_beatstep_seq_length)` """ processed_features_dict = {} for feature_name, feature_value in inputs.items(): if feature_name == "input_features": padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=True) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict["attention_mask"] = attention_mask else: padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=False) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict[f"attention_mask_{feature_name}"] = attention_mask # If we are processing only one example, we should remove the zero array line since we don't need it to # seperate examples from each other. if not is_batched and not return_attention_mask: processed_features_dict["input_features"] = processed_features_dict["input_features"][:-1, ...] outputs = BatchFeature(processed_features_dict, tensor_type=return_tensors) return outputs def __call__( self, audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Union[int, List[int]], steps_per_beat: int = 2, resample: Optional[bool] = True, return_attention_mask: Optional[bool] = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model. Args: audio (`np.ndarray`, `List`): The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. sampling_rate (`int`): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. steps_per_beat (`int`, *optional*, defaults to 2): This is used in interpolating `beat_times`. resample (`bool`, *optional*, defaults to `True`): Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True during inference. return_attention_mask (`bool` *optional*, defaults to `False`): Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as output or not. Automatically set to True for batched inputs. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. """ requires_backends(self, ["librosa"]) is_batched = bool(isinstance(audio, (list, tuple)) and isinstance(audio[0], (np.ndarray, tuple, list))) if is_batched: # This enables the user to process files of different sampling_rate at same time if not isinstance(sampling_rate, list): raise ValueError( "Please give sampling_rate of each audio separately when you are passing multiple raw_audios at the same time. " f"Received {sampling_rate}, expected [audio_1_sr, ..., audio_n_sr]." ) return_attention_mask = True if return_attention_mask is None else return_attention_mask else: audio = [audio] sampling_rate = [sampling_rate] return_attention_mask = False if return_attention_mask is None else return_attention_mask batch_input_features, batch_beatsteps, batch_ext_beatstep = [], [], [] for single_raw_audio, single_sampling_rate in zip(audio, sampling_rate): bpm, beat_times, confidence, estimates, essentia_beat_intervals = self.extract_rhythm( audio=single_raw_audio ) beatsteps = self.interpolate_beat_times(beat_times=beat_times, steps_per_beat=steps_per_beat, n_extend=1) if self.sampling_rate != single_sampling_rate and self.sampling_rate is not None: if resample: # Change sampling_rate to self.sampling_rate single_raw_audio = librosa.core.resample( single_raw_audio, orig_sr=single_sampling_rate, target_sr=self.sampling_rate, res_type="kaiser_best", ) else: warnings.warn( f"The sampling_rate of the provided audio is different from the target sampling_rate " f"of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. " f"In these cases it is recommended to use `resample=True` in the `__call__` method to " f"get the optimal behaviour." ) single_sampling_rate = self.sampling_rate start_sample = int(beatsteps[0] * single_sampling_rate) end_sample = int(beatsteps[-1] * single_sampling_rate) input_features, extrapolated_beatstep = self.preprocess_mel( single_raw_audio[start_sample:end_sample], beatsteps - beatsteps[0] ) mel_specs = self.mel_spectrogram(input_features.astype(np.float32)) # apply np.log to get log mel-spectrograms log_mel_specs = np.log(np.clip(mel_specs, a_min=1e-6, a_max=None)) input_features = np.transpose(log_mel_specs, (0, -1, -2)) batch_input_features.append(input_features) batch_beatsteps.append(beatsteps) batch_ext_beatstep.append(extrapolated_beatstep) output = BatchFeature( { "input_features": batch_input_features, "beatsteps": batch_beatsteps, "extrapolated_beatstep": batch_ext_beatstep, } ) output = self.pad( output, is_batched=is_batched, return_attention_mask=return_attention_mask, return_tensors=return_tensors, ) return output
class_definition
1,289
19,837
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/feature_extraction_pop2piano.py
null
8,651
class Pop2PianoTokenizer(PreTrainedTokenizer): """ Constructs a Pop2Piano tokenizer. This tokenizer does not require training. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab (`str`): Path to the vocab file which contains the vocabulary. default_velocity (`int`, *optional*, defaults to 77): Determines the default velocity to be used while creating midi Notes. num_bars (`int`, *optional*, defaults to 2): Determines cutoff_time_idx in for each token. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"-1"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 1): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 0): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 2): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. """ model_input_names = ["token_ids", "attention_mask"] vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab, default_velocity=77, num_bars=2, unk_token="-1", eos_token="1", pad_token="0", bos_token="2", **kwargs, ): unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token self.default_velocity = default_velocity self.num_bars = num_bars # Load the vocab with open(vocab, "rb") as file: self.encoder = json.load(file) # create mappings for encoder self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, eos_token=eos_token, pad_token=pad_token, bos_token=bos_token, **kwargs, ) @property def vocab_size(self): """Returns the vocabulary size of the tokenizer.""" return len(self.encoder) def get_vocab(self): """Returns the vocabulary of the tokenizer.""" return dict(self.encoder, **self.added_tokens_encoder) def _convert_id_to_token(self, token_id: int) -> list: """ Decodes the token ids generated by the transformer into notes. Args: token_id (`int`): This denotes the ids generated by the transformers to be converted to Midi tokens. Returns: `List`: A list consists of token_type (`str`) and value (`int`). """ token_type_value = self.decoder.get(token_id, f"{self.unk_token}_TOKEN_TIME") token_type_value = token_type_value.split("_") token_type, value = "_".join(token_type_value[1:]), int(token_type_value[0]) return [token_type, value] def _convert_token_to_id(self, token, token_type="TOKEN_TIME") -> int: """ Encodes the Midi tokens to transformer generated token ids. Args: token (`int`): This denotes the token value. token_type (`str`): This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME", "TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL". Returns: `int`: returns the id of the token. """ return self.encoder.get(f"{token}_{token_type}", int(self.unk_token)) def relative_batch_tokens_ids_to_notes( self, tokens: np.ndarray, beat_offset_idx: int, bars_per_batch: int, cutoff_time_idx: int, ): """ Converts relative tokens to notes which are then used to generate pretty midi object. Args: tokens (`numpy.ndarray`): Tokens to be converted to notes. beat_offset_idx (`int`): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`): A parameter to control the Midi output generation. cutoff_time_idx (`int`): Denotes the cutoff time index for each note in generated Midi. """ notes = None for index in range(len(tokens)): _tokens = tokens[index] _start_idx = beat_offset_idx + index * bars_per_batch * 4 _cutoff_time_idx = cutoff_time_idx + _start_idx _notes = self.relative_tokens_ids_to_notes( _tokens, start_idx=_start_idx, cutoff_time_idx=_cutoff_time_idx, ) if len(_notes) == 0: pass elif notes is None: notes = _notes else: notes = np.concatenate((notes, _notes), axis=0) if notes is None: return [] return notes def relative_batch_tokens_ids_to_midi( self, tokens: np.ndarray, beatstep: np.ndarray, beat_offset_idx: int = 0, bars_per_batch: int = 2, cutoff_time_idx: int = 12, ): """ Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens to notes then uses `notes_to_midi` method to convert them to Midi. Args: tokens (`numpy.ndarray`): Denotes tokens which alongside beatstep will be converted to Midi. beatstep (`np.ndarray`): We get beatstep from feature extractor which is also used to get Midi. beat_offset_idx (`int`, *optional*, defaults to 0): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`, *optional*, defaults to 2): A parameter to control the Midi output generation. cutoff_time_idx (`int`, *optional*, defaults to 12): Denotes the cutoff time index for each note in generated Midi. """ beat_offset_idx = 0 if beat_offset_idx is None else beat_offset_idx notes = self.relative_batch_tokens_ids_to_notes( tokens=tokens, beat_offset_idx=beat_offset_idx, bars_per_batch=bars_per_batch, cutoff_time_idx=cutoff_time_idx, ) midi = self.notes_to_midi(notes, beatstep, offset_sec=beatstep[beat_offset_idx]) return midi # Taken from the original code # Please see https://github.com/sweetcocoa/pop2piano/blob/fac11e8dcfc73487513f4588e8d0c22a22f2fdc5/midi_tokenizer.py#L257 def relative_tokens_ids_to_notes(self, tokens: np.ndarray, start_idx: float, cutoff_time_idx: float = None): """ Converts relative tokens to notes which will then be used to create Pretty Midi objects. Args: tokens (`numpy.ndarray`): Relative Tokens which will be converted to notes. start_idx (`float`): A parameter which denotes the starting index. cutoff_time_idx (`float`, *optional*): A parameter used while converting tokens to notes. """ words = [self._convert_id_to_token(token) for token in tokens] current_idx = start_idx current_velocity = 0 note_onsets_ready = [None for i in range(sum([k.endswith("NOTE") for k in self.encoder.keys()]) + 1)] notes = [] for token_type, number in words: if token_type == "TOKEN_SPECIAL": if number == 1: break elif token_type == "TOKEN_TIME": current_idx = token_time_to_note( number=number, cutoff_time_idx=cutoff_time_idx, current_idx=current_idx ) elif token_type == "TOKEN_VELOCITY": current_velocity = number elif token_type == "TOKEN_NOTE": notes = token_note_to_note( number=number, current_velocity=current_velocity, default_velocity=self.default_velocity, note_onsets_ready=note_onsets_ready, current_idx=current_idx, notes=notes, ) else: raise ValueError("Token type not understood!") for pitch, note_onset in enumerate(note_onsets_ready): # force offset if no offset for each pitch if note_onset is not None: if cutoff_time_idx is None: cutoff = note_onset + 1 else: cutoff = max(cutoff_time_idx, note_onset + 1) offset_idx = max(current_idx, cutoff) notes.append([note_onset, offset_idx, pitch, self.default_velocity]) if len(notes) == 0: return [] else: notes = np.array(notes) note_order = notes[:, 0] * 128 + notes[:, 1] notes = notes[note_order.argsort()] return notes def notes_to_midi(self, notes: np.ndarray, beatstep: np.ndarray, offset_sec: int = 0.0): """ Converts notes to Midi. Args: notes (`numpy.ndarray`): This is used to create Pretty Midi objects. beatstep (`numpy.ndarray`): This is the extrapolated beatstep that we get from feature extractor. offset_sec (`int`, *optional*, defaults to 0.0): This represents the offset seconds which is used while creating each Pretty Midi Note. """ requires_backends(self, ["pretty_midi"]) new_pm = pretty_midi.PrettyMIDI(resolution=384, initial_tempo=120.0) new_inst = pretty_midi.Instrument(program=0) new_notes = [] for onset_idx, offset_idx, pitch, velocity in notes: new_note = pretty_midi.Note( velocity=velocity, pitch=pitch, start=beatstep[onset_idx] - offset_sec, end=beatstep[offset_idx] - offset_sec, ) new_notes.append(new_note) new_inst.notes = new_notes new_pm.instruments.append(new_inst) new_pm.remove_invalid_notes() return new_pm def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: """ Saves the tokenizer's vocabulary dictionary to the provided save_directory. Args: save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. filename_prefix (`Optional[str]`, *optional*): A prefix to add to the names of the files saved by the tokenizer. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return # Save the encoder. out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"] ) with open(out_vocab_file, "w") as file: file.write(json.dumps(self.encoder)) return (out_vocab_file,) def encode_plus( self, notes: Union[np.ndarray, List[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy] = None, max_length: Optional[int] = None, **kwargs, ) -> BatchEncoding: r""" This is the `encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It only works on a single batch, to process multiple batches please use `batch_encode_plus` or `__call__` method. Args: notes (`numpy.ndarray` of shape `[sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. """ requires_backends(self, ["pretty_midi"]) # check if notes is a pretty_midi object or not, if yes then extract the attributes and put them into a numpy # array. if isinstance(notes[0], pretty_midi.Note): notes = np.array( [[each_note.start, each_note.end, each_note.pitch, each_note.velocity] for each_note in notes] ).reshape(-1, 4) # to round up all the values to the closest int values. notes = np.round(notes).astype(np.int32) max_time_idx = notes[:, :2].max() times = [[] for i in range((max_time_idx + 1))] for onset, offset, pitch, velocity in notes: times[onset].append([pitch, velocity]) times[offset].append([pitch, 0]) tokens = [] current_velocity = 0 for i, time in enumerate(times): if len(time) == 0: continue tokens.append(self._convert_token_to_id(i, "TOKEN_TIME")) for pitch, velocity in time: velocity = int(velocity > 0) if current_velocity != velocity: current_velocity = velocity tokens.append(self._convert_token_to_id(velocity, "TOKEN_VELOCITY")) tokens.append(self._convert_token_to_id(pitch, "TOKEN_NOTE")) total_len = len(tokens) # truncation if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: tokens, _, _ = self.truncate_sequences( ids=tokens, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, **kwargs, ) return BatchEncoding({"token_ids": tokens}) def batch_encode_plus( self, notes: Union[np.ndarray, List[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy] = None, max_length: Optional[int] = None, **kwargs, ) -> BatchEncoding: r""" This is the `batch_encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It works on multiple batches by calling `encode_plus` multiple times in a loop. Args: notes (`numpy.ndarray` of shape `[batch_size, sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. """ encoded_batch_token_ids = [] for i in range(len(notes)): encoded_batch_token_ids.append( self.encode_plus( notes[i], truncation_strategy=truncation_strategy, max_length=max_length, **kwargs, )["token_ids"] ) return BatchEncoding({"token_ids": encoded_batch_token_ids}) def __call__( self, notes: Union[ np.ndarray, List[pretty_midi.Note], List[List[pretty_midi.Note]], ], padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, **kwargs, ) -> BatchEncoding: r""" This is the `__call__` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. Args: notes (`numpy.ndarray` of shape `[batch_size, max_sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. Returns: `BatchEncoding` containing the token_ids. """ # check if it is batched or not # it is batched if its a list containing a list of `pretty_midi.Notes` where the outer list contains all the # batches and the inner list contains all Notes for a single batch. Otherwise if np.ndarray is passed it will be # considered batched if it has shape of `[batch_size, seqence_length, 4]` or ndim=3. is_batched = notes.ndim == 3 if isinstance(notes, np.ndarray) else isinstance(notes[0], list) # get the truncation and padding strategy padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) if is_batched: # If the user has not explicitly mentioned `return_attention_mask` as False, we change it to True return_attention_mask = True if return_attention_mask is None else return_attention_mask token_ids = self.batch_encode_plus( notes=notes, truncation_strategy=truncation_strategy, max_length=max_length, **kwargs, ) else: token_ids = self.encode_plus( notes=notes, truncation_strategy=truncation_strategy, max_length=max_length, **kwargs, ) # since we already have truncated sequnences we are just left to do padding token_ids = self.pad( token_ids, padding=padding_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_tensors=return_tensors, verbose=verbose, ) return token_ids def batch_decode( self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool = True, ): r""" This is the `batch_decode` method for `Pop2PianoTokenizer`. It converts the token_ids generated by the transformer to midi_notes and returns them. Args: token_ids (`Union[np.ndarray, torch.Tensor, tf.Tensor]`): Output token_ids of `Pop2PianoConditionalGeneration` model. feature_extractor_output (`BatchFeature`): Denotes the output of `Pop2PianoFeatureExtractor.__call__`. It must contain `"beatstep"` and `"extrapolated_beatstep"`. Also `"attention_mask_beatsteps"` and `"attention_mask_extrapolated_beatstep"` should be present if they were returned by the feature extractor. return_midi (`bool`, *optional*, defaults to `True`): Whether to return midi object or not. Returns: If `return_midi` is True: - `BatchEncoding` containing both `notes` and `pretty_midi.pretty_midi.PrettyMIDI` objects. If `return_midi` is False: - `BatchEncoding` containing `notes`. """ # check if they have attention_masks(attention_mask, attention_mask_beatsteps, attention_mask_extrapolated_beatstep) or not attention_masks_present = bool( hasattr(feature_extractor_output, "attention_mask") and hasattr(feature_extractor_output, "attention_mask_beatsteps") and hasattr(feature_extractor_output, "attention_mask_extrapolated_beatstep") ) # if we are processing batched inputs then we must need attention_masks if not attention_masks_present and feature_extractor_output["beatsteps"].shape[0] > 1: raise ValueError( "attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep must be present " "for batched inputs! But one of them were not present." ) # check for length mismatch between inputs_embeds, beatsteps and extrapolated_beatstep if attention_masks_present: # since we know about the number of examples in token_ids from attention_mask if ( sum(feature_extractor_output["attention_mask"][:, 0] == 0) != feature_extractor_output["beatsteps"].shape[0] or feature_extractor_output["beatsteps"].shape[0] != feature_extractor_output["extrapolated_beatstep"].shape[0] ): raise ValueError( "Length mistamtch between token_ids, beatsteps and extrapolated_beatstep! Found " f"token_ids length - {token_ids.shape[0]}, beatsteps shape - {feature_extractor_output['beatsteps'].shape[0]} " f"and extrapolated_beatsteps shape - {feature_extractor_output['extrapolated_beatstep'].shape[0]}" ) if feature_extractor_output["attention_mask"].shape[0] != token_ids.shape[0]: raise ValueError( f"Found attention_mask of length - {feature_extractor_output['attention_mask'].shape[0]} but token_ids of length - {token_ids.shape[0]}" ) else: # if there is no attention mask present then it's surely a single example if ( feature_extractor_output["beatsteps"].shape[0] != 1 or feature_extractor_output["extrapolated_beatstep"].shape[0] != 1 ): raise ValueError( "Length mistamtch of beatsteps and extrapolated_beatstep! Since attention_mask is not present the number of examples must be 1, " f"But found beatsteps length - {feature_extractor_output['beatsteps'].shape[0]}, extrapolated_beatsteps length - {feature_extractor_output['extrapolated_beatstep'].shape[0]}." ) if attention_masks_present: # check for zeros(since token_ids are seperated by zero arrays) batch_idx = np.where(feature_extractor_output["attention_mask"][:, 0] == 0)[0] else: batch_idx = [token_ids.shape[0]] notes_list = [] pretty_midi_objects_list = [] start_idx = 0 for index, end_idx in enumerate(batch_idx): each_tokens_ids = token_ids[start_idx:end_idx] # check where the whole example ended by searching for eos_token_id and getting the upper bound each_tokens_ids = each_tokens_ids[:, : np.max(np.where(each_tokens_ids == int(self.eos_token))[1]) + 1] beatsteps = feature_extractor_output["beatsteps"][index] extrapolated_beatstep = feature_extractor_output["extrapolated_beatstep"][index] # if attention mask is present then mask out real array/tensor if attention_masks_present: attention_mask_beatsteps = feature_extractor_output["attention_mask_beatsteps"][index] attention_mask_extrapolated_beatstep = feature_extractor_output[ "attention_mask_extrapolated_beatstep" ][index] beatsteps = beatsteps[: np.max(np.where(attention_mask_beatsteps == 1)[0]) + 1] extrapolated_beatstep = extrapolated_beatstep[ : np.max(np.where(attention_mask_extrapolated_beatstep == 1)[0]) + 1 ] each_tokens_ids = to_numpy(each_tokens_ids) beatsteps = to_numpy(beatsteps) extrapolated_beatstep = to_numpy(extrapolated_beatstep) pretty_midi_object = self.relative_batch_tokens_ids_to_midi( tokens=each_tokens_ids, beatstep=extrapolated_beatstep, bars_per_batch=self.num_bars, cutoff_time_idx=(self.num_bars + 1) * 4, ) for note in pretty_midi_object.instruments[0].notes: note.start += beatsteps[0] note.end += beatsteps[0] notes_list.append(note) pretty_midi_objects_list.append(pretty_midi_object) start_idx += end_idx + 1 # 1 represents the zero array if return_midi: return BatchEncoding({"notes": notes_list, "pretty_midi_objects": pretty_midi_objects_list}) return BatchEncoding({"notes": notes_list})
class_definition
2,014
32,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/tokenization_pop2piano.py
null
8,652
class Pop2PianoProcessor(ProcessorMixin): r""" Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single processor. [`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`]. See the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information. Args: feature_extractor (`Pop2PianoFeatureExtractor`): An instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input. """ attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = "Pop2PianoFeatureExtractor" tokenizer_class = "Pop2PianoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__( self, audio: Union[np.ndarray, List[float], List[np.ndarray]] = None, sampling_rate: Union[int, List[int]] = None, steps_per_beat: int = 2, resample: Optional[bool] = True, notes: Union[List, TensorType] = None, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, verbose: bool = True, **kwargs, ) -> Union[BatchFeature, BatchEncoding]: """ This method uses [`Pop2PianoFeatureExtractor.__call__`] method to prepare log-mel-spectrograms for the model, and [`Pop2PianoTokenizer.__call__`] to prepare token_ids from notes. Please refer to the docstring of the above two methods for more information. """ # Since Feature Extractor needs both audio and sampling_rate and tokenizer needs both token_ids and # feature_extractor_output, we must check for both. if (audio is None and sampling_rate is None) and (notes is None): raise ValueError( "You have to specify at least audios and sampling_rate in order to use feature extractor or " "notes to use the tokenizer part." ) if audio is not None and sampling_rate is not None: inputs = self.feature_extractor( audio=audio, sampling_rate=sampling_rate, steps_per_beat=steps_per_beat, resample=resample, **kwargs, ) if notes is not None: encoded_token_ids = self.tokenizer( notes=notes, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) if notes is None: return inputs elif audio is None or sampling_rate is None: return encoded_token_ids else: inputs["token_ids"] = encoded_token_ids["token_ids"] return inputs def batch_decode( self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool = True, ) -> BatchEncoding: """ This method uses [`Pop2PianoTokenizer.batch_decode`] method to convert model generated token_ids to midi_notes. Please refer to the docstring of the above two methods for more information. """ return self.tokenizer.batch_decode( token_ids=token_ids, feature_extractor_output=feature_extractor_output, return_midi=return_midi ) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) def save_pretrained(self, save_directory, **kwargs): if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) return super().save_pretrained(save_directory, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args)
class_definition
934
5,523
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/processing_pop2piano.py
null
8,653
class Pop2PianoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 2400): Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. composer_vocab_size (`int`, *optional*, defaults to 21): Denotes the number of composers. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `Pop2PianoBlock`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. """ model_type = "pop2piano" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=2400, composer_vocab_size=21, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", # noqa is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, dense_act_fn="relu", **kwargs, ): self.vocab_size = vocab_size self.composer_vocab_size = composer_vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache self.dense_act_fn = dense_act_fn self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated" self.hidden_size = self.d_model self.num_attention_heads = num_heads self.num_hidden_layers = num_layers super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, )
class_definition
786
5,926
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pop2piano/configuration_pop2piano.py
null
8,654
class MBart50Tokenizer(PreTrainedTokenizer): """ Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import MBart50Tokenizer >>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO") >>> src_text = " UN Chief Says There Is No Military Solution in Syria" >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") >>> # model(**model_inputs) should work ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file, src_lang=None, tgt_lang=None, eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} super().__init__( src_lang=src_lang, tgt_lang=tgt_lang, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def vocab_size(self) -> int: return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self) -> Dict: state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d: Dict) -> None: self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def get_vocab(self) -> Dict: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `[src_lang_code] X [eos]` - `labels`: (for decoder) `[tgt_lang_code] X [eos]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang: str) -> None: """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos].""" self.cur_lang_code_id = self.lang_code_to_id[src_lang] self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos].""" self.cur_lang_code_id = self.lang_code_to_id[tgt_lang] self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id]
class_definition
1,518
16,306
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mbart50/tokenization_mbart50.py
null
8,655
class MBart50TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MBART tokenizer for mBART-50 (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. Examples: ```python >>> from transformers import MBart50TokenizerFast >>> tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO") >>> src_text = " UN Chief Says There Is No Military Solution in Syria" >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") >>> # model(**model_inputs) should work ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MBart50Tokenizer prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file=None, src_lang=None, tgt_lang=None, tokenizer_file=None, eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( vocab_file, src_lang=src_lang, tgt_lang=tgt_lang, tokenizer_file=tokenizer_file, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "en_XX" self.tgt_lang = tgt_lang self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] self.set_src_lang_special_tokens(self._src_lang) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART-50 sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `[src_lang_code] X [eos]` - `labels`: (for decoder) `[tgt_lang_code] X [eos]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang: str) -> None: """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos].""" self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang) self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos].""" self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang) self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
class_definition
1,717
11,593
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mbart50/tokenization_mbart50_fast.py
null
8,656
class TFRobertaPreLayerNormEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) self.padding_idx = 1 self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: tf.Tensor Returns: tf.Tensor """ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + self.padding_idx def call( self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False, ): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids=input_ids, past_key_values_length=past_key_values_length ) else: position_ids = tf.expand_dims( tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
class_definition
2,187
6,396
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,657
class TFRobertaPreLayerNormPooler(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
class_definition
6,499
7,498
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,658
class TFRobertaPreLayerNormSelfAttention(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFRobertaPreLayerNormModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size])
class_definition
7,608
14,470
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,659
class TFRobertaPreLayerNormSelfOutput(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
class_definition
14,473
15,515
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,660
class TFRobertaPreLayerNormAttention(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFRobertaPreLayerNormSelfAttention(config, name="self") self.dense_output = TFRobertaPreLayerNormSelfOutput(config, name="output") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention.prune_heads def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: hidden_states_pre_layer_norm = self.LayerNorm(inputs=input_tensor) self_outputs = self.self_attention( hidden_states=hidden_states_pre_layer_norm, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size])
class_definition
15,518
17,910
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,661
class TFRobertaPreLayerNormIntermediate(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.LayerNorm(inputs=hidden_states) hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
class_definition
17,913
19,318
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,662
class TFRobertaPreLayerNormOutput(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size])
class_definition
19,321
20,365
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,663
class TFRobertaPreLayerNormLayer(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.attention = TFRobertaPreLayerNormAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFRobertaPreLayerNormAttention(config, name="crossattention") self.intermediate = TFRobertaPreLayerNormIntermediate(config, name="intermediate") self.bert_output = TFRobertaPreLayerNormOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None)
class_definition
20,467
25,286
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,664
class TFRobertaPreLayerNormEncoder(keras.layers.Layer): def __init__(self, config: RobertaPreLayerNormConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFRobertaPreLayerNormLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None)
class_definition
25,390
28,516
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,665
class TFRobertaPreLayerNormMainLayer(keras.layers.Layer): config_class = RobertaPreLayerNormConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.encoder = TFRobertaPreLayerNormEncoder(config, name="encoder") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.pooler = TFRobertaPreLayerNormPooler(config, name="pooler") if add_pooling_layer else None # The embeddings must be the last declaration in order to follow the weights order self.embeddings = TFRobertaPreLayerNormEmbeddings(config, name="embeddings") def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.LayerNorm(inputs=sequence_output) pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None)
class_definition
28,539
38,962
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,666
class TFRobertaPreLayerNormPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm"
class_definition
39,116
39,416
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,667
class TFRobertaPreLayerNormModel(TFRobertaPreLayerNormPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm") @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.roberta_prelayernorm( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None)
class_definition
45,535
49,524
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,668
class TFRobertaPreLayerNormLMHead(keras.layers.Layer): """RobertaPreLayerNorm Head for masked language modeling.""" def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.act = get_tf_activation("gelu") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, value): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.layer_norm(hidden_states) # project back to size of vocabulary with bias seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states
class_definition
49,639
52,081
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,669
class TFRobertaPreLayerNormForMaskedLM(TFRobertaPreLayerNormPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.__init__ with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer( config, add_pooling_layer=False, name="roberta_prelayernorm" ) self.lm_head = TFRobertaPreLayerNormLMHead(config, self.roberta_prelayernorm.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.69, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.call with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None)
class_definition
52,224
56,450
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,670
class TFRobertaPreLayerNormForCausalLM(TFRobertaPreLayerNormPreTrainedModel, TFCausalLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] def __init__(self, config: RobertaPreLayerNormConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning( "If you want to use `TFRobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`" ) self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer( config, add_pooling_layer=False, name="roberta_prelayernorm" ) self.lm_head = TFRobertaPreLayerNormLMHead( config, input_embeddings=self.roberta_prelayernorm.embeddings, name="lm_head" ) def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = tf.ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ outputs = self.roberta_prelayernorm( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.lm_head(hidden_states=sequence_output, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None)
class_definition
56,630
63,488
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,671
class TFRobertaPreLayerNormClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, features, training=False): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size])
class_definition
63,615
65,181
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,672
class TFRobertaPreLayerNormForSequenceClassification( TFRobertaPreLayerNormPreTrainedModel, TFSequenceClassificationLoss ): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer( config, add_pooling_layer=False, name="roberta_prelayernorm" ) self.classifier = TFRobertaPreLayerNormClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification.call with roberta->roberta_prelayernorm def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None)
class_definition
65,435
69,101
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,673
class TFRobertaPreLayerNormForMultipleChoice(TFRobertaPreLayerNormPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None outputs = self.roberta_prelayernorm( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
class_definition
69,546
73,891
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,674
class TFRobertaPreLayerNormForTokenClassification(TFRobertaPreLayerNormPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer( config, add_pooling_layer=False, name="roberta_prelayernorm" ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification.call with roberta->roberta_prelayernorm def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
class_definition
74,152
78,076
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,675
class TFRobertaPreLayerNormForQuestionAnswering(TFRobertaPreLayerNormPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer( config, add_pooling_layer=False, name="roberta_prelayernorm" ) self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering.call with roberta->roberta_prelayernorm def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta_prelayernorm", None) is not None: with tf.name_scope(self.roberta_prelayernorm.name): self.roberta_prelayernorm.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size])
class_definition
78,395
83,039
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py
null
8,676
class RobertaPreLayerNormConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`] or a [`TFRobertaPreLayerNormModel`]. It is used to instantiate a RoBERTa-PreLayerNorm model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa-PreLayerNorm [andreasmadsen/efficient_mlm_m0.40](https://huggingface.co/andreasmadsen/efficient_mlm_m0.40) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import RobertaPreLayerNormConfig, RobertaPreLayerNormModel >>> # Initializing a RoBERTa-PreLayerNorm configuration >>> configuration = RobertaPreLayerNormConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = RobertaPreLayerNormModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "roberta-prelayernorm" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout
class_definition
1,159
7,228
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py
null
8,677
class RobertaPreLayerNormOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
class_definition
7,347
7,807
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py
null
8,678
class RobertaPreLayerNormEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape)
class_definition
1,997
6,189
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,679
class RobertaPreLayerNormSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaPreLayerNormModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs
class_definition
6,294
13,666
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,680
class RobertaPreLayerNormSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states
class_definition
13,669
14,189
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,681
class RobertaPreLayerNormAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = RobertaPreLayerNormSelfAttention(config, position_embedding_type=position_embedding_type) self.output = RobertaPreLayerNormSelfOutput(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: hidden_states_pre_layer_norm = self.LayerNorm(hidden_states) self_outputs = self.self( hidden_states_pre_layer_norm, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
14,192
16,550
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,682
class RobertaPreLayerNormIntermediate(nn.Module): def __init__(self, config): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
16,553
17,272
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,683
class RobertaPreLayerNormOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states
class_definition
17,275
17,797
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,684
class RobertaPreLayerNormLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RobertaPreLayerNormAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RobertaPreLayerNormAttention(config, position_embedding_type="absolute") self.intermediate = RobertaPreLayerNormIntermediate(config) self.output = RobertaPreLayerNormOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class_definition
17,894
21,876
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,685
class RobertaPreLayerNormEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RobertaPreLayerNormLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, )
class_definition
21,975
25,795
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,686
class RobertaPreLayerNormPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class_definition
25,862
26,436
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,687
class RobertaPreLayerNormPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm" supports_gradient_checkpointing = True _no_split_modules = ["RobertaPreLayerNormEmbeddings", "RobertaPreLayerNormSelfAttention"] # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
26,439
27,774
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,688
class RobertaPreLayerNormModel(RobertaPreLayerNormPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RobertaPreLayerNormEmbeddings(config) self.encoder = RobertaPreLayerNormEncoder(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = RobertaPreLayerNormPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.LayerNorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, )
class_definition
31,706
41,108
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,689
class RobertaPreLayerNormForCausalLM(RobertaPreLayerNormPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning( "If you want to use `RobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`" ) self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) self.lm_head = RobertaPreLayerNormLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, RobertaPreLayerNormForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("andreasmadsen/efficient_mlm_m0.40") >>> config = AutoConfig.from_pretrained("andreasmadsen/efficient_mlm_m0.40") >>> config.is_decoder = True >>> model = RobertaPreLayerNormForCausalLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(prediction_scores.device) # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
class_definition
41,555
48,404
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,690
class RobertaPreLayerNormForMaskedLM(RobertaPreLayerNormPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.__init__ with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `RobertaPreLayerNormForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) self.lm_head = RobertaPreLayerNormLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.69, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.forward with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
48,547
52,906
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,691
class RobertaPreLayerNormLMHead(nn.Module): """RobertaPreLayerNorm Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias
class_definition
53,016
54,102
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,692
class RobertaPreLayerNormForSequenceClassification(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) self.classifier = RobertaPreLayerNormClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification.forward with roberta->roberta_prelayernorm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
54,356
58,496
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,693
class RobertaPreLayerNormForMultipleChoice(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta_prelayernorm = RobertaPreLayerNormModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta_prelayernorm( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
58,936
62,738
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,694
class RobertaPreLayerNormForTokenClassification(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.forward with roberta->roberta_prelayernorm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
62,998
66,218
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,695
class RobertaPreLayerNormClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x
class_definition
66,340
67,125
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,696
class RobertaPreLayerNormForQuestionAnswering(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.forward with roberta->roberta_prelayernorm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
67,443
71,907
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
null
8,697
class FlaxRobertaPreLayerNormEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states
class_definition
6,208
8,059
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,698
class FlaxRobertaPreLayerNormSelfAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs
class_definition
8,173
16,094
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,699