text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class SpeechT5ForSpeechToSpeech(SpeechT5PreTrainedModel): def __init__(self, config: SpeechT5Config): super().__init__(config) speech_encoder = SpeechT5EncoderWithSpeechPrenet(config) speech_decoder = SpeechT5DecoderWithSpeechPrenet(config) self.speecht5 = SpeechT5Model(config, speech_encoder, speech_decoder) self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.speecht5.get_encoder() def get_decoder(self): return self.speecht5.get_decoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.get_encoder().prenet.freeze_feature_encoder() @add_start_docstrings_to_model_forward(SPEECHT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqSpectrogramOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_values: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, stop_labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, Seq2SeqSpectrogramOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details. decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`): Float values of input mel spectrogram. SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see `past_key_values`). speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*): Float values of target mel spectrogram. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`] for details. Returns: Example: ```python >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech, SpeechT5HifiGan, set_seed >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset( ... "hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True ... ) # doctest: +IGNORE_RESULT >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc") >>> model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc") >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") >>> # audio file is decoded on the fly >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file >>> set_seed(555) # make deterministic >>> # generate speech >>> speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder) >>> speech.shape torch.Size([77824]) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_values is None: decoder_input_values, decoder_attention_mask = shift_spectrograms_right( labels, self.config.reduction_factor, decoder_attention_mask ) outputs = self.speecht5( input_values=input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, use_cache=use_cache, speaker_embeddings=speaker_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) _, spectrogram, logits = self.speech_decoder_postnet(outputs[0]) loss = None if not return_dict: output = (spectrogram,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSpectrogramOutput( loss=loss, spectrogram=spectrogram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @torch.no_grad() def generate_speech( self, input_values: torch.FloatTensor, speaker_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, threshold: float = 0.5, minlenratio: float = 0.0, maxlenratio: float = 20.0, vocoder: Optional[nn.Module] = None, output_cross_attentions: bool = False, return_output_lengths: bool = False, ) -> torch.FloatTensor: r""" Converts a raw speech waveform into a sequence of mel spectrograms, which are subsequently turned back into a speech waveform using a vocoder. Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) threshold (`float`, *optional*, defaults to 0.5): The generated sequence ends when the predicted stop token probability exceeds this value. minlenratio (`float`, *optional*, defaults to 0.0): Used to calculate the minimum required length for the output sequence. maxlenratio (`float`, *optional*, defaults to 20.0): Used to calculate the maximum allowed length for the output sequence. vocoder (`nn.Module`, *optional*, defaults to `None`): The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel spectrogram. output_cross_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of the decoder's cross-attention layers. return_output_lengths (`bool`, *optional*, defaults to `False`): Whether or not to return the concrete spectrogram/waveform lengths. Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the inputs: - when `return_output_lengths` is False - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram. - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(num_frames,)` -- The predicted speech waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. - when `return_output_lengths` is True - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that are padded to the maximum length. - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `List[Int]` -- A list of all the concrete lengths for each spectrogram. - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length. - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `List[Int]` -- A list of all the concrete lengths for each waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. """ if speaker_embeddings is None: speaker_embeddings = torch.zeros((1, 512), device=input_values.device) return _generate_speech( self, input_values, speaker_embeddings, attention_mask, threshold, minlenratio, maxlenratio, vocoder, output_cross_attentions, return_output_lengths, )
class_definition
134,225
146,417
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/modeling_speecht5.py
null
8,500
class HifiGanResidualBlock(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i]), ) for i in range(len(dilation)) ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1), ) for _ in range(len(dilation)) ] ) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm for layer in self.convs1: weight_norm(layer) for layer in self.convs2: weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for conv1, conv2 in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states
class_definition
147,308
149,437
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/modeling_speecht5.py
null
8,501
class SpeechT5HifiGan(PreTrainedModel): config_class = SpeechT5HifiGanConfig main_input_name = "spectrogram" def __init__(self, config: SpeechT5HifiGanConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.num_upsamples = len(config.upsample_rates) self.conv_pre = nn.Conv1d( config.model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3, ) self.upsampler = nn.ModuleList() for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): self.upsampler.append( nn.ConvTranspose1d( config.upsample_initial_channel // (2**i), config.upsample_initial_channel // (2 ** (i + 1)), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2, ) ) self.resblocks = nn.ModuleList() for i in range(len(self.upsampler)): channels = config.upsample_initial_channel // (2 ** (i + 1)) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) self.register_buffer("mean", torch.zeros(config.model_in_dim)) self.register_buffer("scale", torch.ones(config.model_in_dim)) # Initialize weights and apply final processing self.post_init() def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv_pre) for layer in self.upsampler: weight_norm(layer) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: r""" Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech waveform. Args: spectrogram (`torch.FloatTensor`): Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`. Returns: `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`. """ if self.config.normalize_before: spectrogram = (spectrogram - self.mean) / self.scale is_batched = spectrogram.dim() == 3 if not is_batched: spectrogram = spectrogram.unsqueeze(0) hidden_states = spectrogram.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) if not is_batched: # remove batch dim and collapse tensor to 1-d audio waveform waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1) else: # remove seq-len dim since this collapses to 1 waveform = hidden_states.squeeze(1) return waveform
class_definition
149,523
154,466
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/modeling_speecht5.py
null
8,502
class SpeechT5Tokenizer(PreTrainedTokenizer): """ Construct a SpeechT5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. normalize (`bool`, *optional*, defaults to `False`): Whether to convert numeric quantities in the text to their spelt-out english counterparts. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", normalize=False, sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.normalize = normalize self._normalizer = None self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, normalize=normalize, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): normalize = kwargs.pop("normalize", self.normalize) if is_split_into_words: text = " " + text if normalize: text = self.normalizer(text) return (text, kwargs) @property def vocab_size(self): return self.sp_model.get_piece_size() @property def normalizer(self): if self._normalizer is None: self._normalizer = EnglishNumberNormalizer() return self._normalizer @normalizer.setter def normalizer(self, value): self._normalizer = value def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) suffix_ones = [1] if token_ids_1 is None: return ([0] * len(token_ids_0)) + suffix_ones return ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
class_definition
1,042
8,911
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/tokenization_speecht5.py
null
8,503
class SpeechT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SpeechT5Model`]. It is used to instantiate a SpeechT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SpeechT5 [microsoft/speecht5_asr](https://huggingface.co/microsoft/speecht5_asr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 81): Vocabulary size of the SpeechT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method of [`SpeechT5Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. encoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. encoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. encoder_layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer decoder. decoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder. decoder_layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. positional_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the text position encoding layers. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in the speech encoder pre-net. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the speech encoder pre-net. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the speech encoder pre-net. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_mel_bins (`int`, *optional*, defaults to 80): Number of mel features used per input features. Used by the speech decoder pre-net. Should correspond to the value used in the [`SpeechT5Processor`] class. speech_decoder_prenet_layers (`int`, *optional*, defaults to 2): Number of layers in the speech decoder pre-net. speech_decoder_prenet_units (`int`, *optional*, defaults to 256): Dimensionality of the layers in the speech decoder pre-net. speech_decoder_prenet_dropout (`float`, *optional*, defaults to 0.5): The dropout probability for the speech decoder pre-net layers. speaker_embedding_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. speech_decoder_postnet_layers (`int`, *optional*, defaults to 5): Number of layers in the speech decoder post-net. speech_decoder_postnet_units (`int`, *optional*, defaults to 256): Dimensionality of the layers in the speech decoder post-net. speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5): Number of convolutional filter channels in the speech decoder post-net. speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5): The dropout probability for the speech decoder post-net layers. reduction_factor (`int`, *optional*, defaults to 2): Spectrogram length reduction factor for the speech decoder inputs. max_speech_positions (`int`, *optional*, defaults to 4000): The maximum sequence length of speech features that this model might ever be used with. max_text_positions (`int`, *optional*, defaults to 450): The maximum sequence length of text features that this model might ever be used with. encoder_max_relative_position (`int`, *optional*, defaults to 160): Maximum distance for relative position embedding in the encoder. use_guided_attention_loss (`bool`, *optional*, defaults to `True`): Whether to apply guided attention loss while training the TTS model. guided_attention_loss_num_heads (`int`, *optional*, defaults to 2): Number of attention heads the guided attention loss will be applied to. Use -1 to apply this loss to all attention heads. guided_attention_loss_sigma (`float`, *optional*, defaults to 0.4): Standard deviation for guided attention loss. guided_attention_loss_scale (`float`, *optional*, defaults to 10.0): Scaling coefficient for guided attention loss (also known as lambda). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import SpeechT5Model, SpeechT5Config >>> # Initializing a "microsoft/speecht5_asr" style configuration >>> configuration = SpeechT5Config() >>> # Initializing a model (with random weights) from the "microsoft/speecht5_asr" style configuration >>> model = SpeechT5Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "speecht5" attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"} def __init__( self, vocab_size=81, hidden_size=768, encoder_layers=12, encoder_attention_heads=12, encoder_ffn_dim=3072, encoder_layerdrop=0.1, decoder_layers=6, decoder_ffn_dim=3072, decoder_attention_heads=12, decoder_layerdrop=0.1, hidden_act="gelu", positional_dropout=0.1, hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-5, scale_embedding=False, feat_extract_norm="group", feat_proj_dropout=0.0, feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, pad_token_id=1, bos_token_id=0, eos_token_id=2, decoder_start_token_id=2, num_mel_bins=80, speech_decoder_prenet_layers=2, speech_decoder_prenet_units=256, speech_decoder_prenet_dropout=0.5, speaker_embedding_dim=512, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, speech_decoder_postnet_dropout=0.5, reduction_factor=2, max_speech_positions=4000, max_text_positions=450, encoder_max_relative_position=160, use_guided_attention_loss=True, guided_attention_loss_num_heads=2, guided_attention_loss_sigma=0.4, guided_attention_loss_scale=10.0, use_cache=True, is_encoder_decoder=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_layers = encoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.decoder_layerdrop = decoder_layerdrop self.hidden_act = hidden_act self.positional_dropout = positional_dropout self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.scale_embedding = scale_embedding self.feat_extract_norm = feat_extract_norm self.feat_proj_dropout = feat_proj_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.num_mel_bins = num_mel_bins self.speech_decoder_prenet_layers = speech_decoder_prenet_layers self.speech_decoder_prenet_units = speech_decoder_prenet_units self.speech_decoder_prenet_dropout = speech_decoder_prenet_dropout self.speaker_embedding_dim = speaker_embedding_dim self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout self.reduction_factor = reduction_factor self.max_speech_positions = max_speech_positions self.max_text_positions = max_text_positions self.encoder_max_relative_position = encoder_max_relative_position self.use_guided_attention_loss = use_guided_attention_loss self.guided_attention_loss_num_heads = guided_attention_loss_num_heads self.guided_attention_loss_sigma = guided_attention_loss_sigma self.guided_attention_loss_scale = guided_attention_loss_scale self.use_cache = use_cache self.is_encoder_decoder = is_encoder_decoder super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
class_definition
864
18,967
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/configuration_speecht5.py
null
8,504
class SpeechT5HifiGanConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SpeechT5HifiGanModel`]. It is used to instantiate a SpeechT5 HiFi-GAN vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SpeechT5 [microsoft/speecht5_hifigan](https://huggingface.co/microsoft/speecht5_hifigan) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_dim (`int`, *optional*, defaults to 80): The number of frequency bins in the input log-mel spectrogram. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the output audio will be generated, expressed in hertz (Hz). upsample_initial_channel (`int`, *optional*, defaults to 512): The number of input channels into the upsampling network. upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[4, 4, 4, 4]`): A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The length of *upsample_rates* defines the number of convolutional layers and has to match the length of *upsample_kernel_sizes*. upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 8, 8]`): A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of *upsample_rates*. resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field fusion (MRF) module. resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the multi-receptive field fusion (MRF) module. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.1): The angle of the negative slope used by the leaky ReLU activation. normalize_before (`bool`, *optional*, defaults to `True`): Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance. Example: ```python >>> from transformers import SpeechT5HifiGan, SpeechT5HifiGanConfig >>> # Initializing a "microsoft/speecht5_hifigan" style configuration >>> configuration = SpeechT5HifiGanConfig() >>> # Initializing a model (with random weights) from the "microsoft/speecht5_hifigan" style configuration >>> model = SpeechT5HifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "hifigan" def __init__( self, model_in_dim=80, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[4, 4, 4, 4], upsample_kernel_sizes=[8, 8, 8, 8], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs, ): self.model_in_dim = model_in_dim self.sampling_rate = sampling_rate self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs)
class_definition
18,970
23,377
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speecht5/configuration_speecht5.py
null
8,505
class Qwen2VLCausalLMOutputWithPast(ModelOutput): """ Base class for Qwen2VL causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None
class_definition
2,180
4,642
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,506
class Qwen2VLRotaryEmbedding(nn.Module): def __init__( self, dim=None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, rope_type="default", config: Optional[Qwen2VLConfig] = None, ): super().__init__() # TODO (joao): remove the `if` below, only used for BC self.rope_kwargs = {} if config is None: logger.warning_once( "`Qwen2VLRotaryEmbedding` can now be fully parameterized by passing the model config through the " "`config` argument. All other arguments will be removed in v4.46" ) self.rope_kwargs = { "rope_type": rope_type, "factor": scaling_factor, "dim": dim, "base": base, "max_position_embeddings": max_position_embeddings, } self.rope_type = rope_type self.max_seq_len_cached = max_position_embeddings self.original_max_seq_len = max_position_embeddings else: # BC: "rope_type" was originally "type" if config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn( self.config, device, seq_len=seq_len, **self.rope_kwargs ) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block. In contrast to other models, Qwen2_VL has different position ids for thw grids # So we expand the inv_freq to shape (3, ...) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
4,645
8,807
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,507
class VisionRotaryEmbedding(nn.Module): def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs
class_definition
12,401
12,902
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,508
class PatchEmbed(nn.Module): def __init__( self, patch_size: int = 14, temporal_patch_size: int = 2, in_channels: int = 3, embed_dim: int = 1152, ) -> None: super().__init__() self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.in_channels = in_channels self.embed_dim = embed_dim kernel_size = [temporal_patch_size, patch_size, patch_size] self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states
class_definition
12,905
13,870
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,509
class PatchMerger(nn.Module): def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: super().__init__() self.hidden_size = context_dim * (spatial_merge_size**2) self.ln_q = LayerNorm(context_dim, eps=1e-6) self.mlp = nn.Sequential( nn.Linear(self.hidden_size, self.hidden_size), nn.GELU(), nn.Linear(self.hidden_size, dim), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) return x
class_definition
13,873
14,444
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,510
class VisionMlp(nn.Module): def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None: super().__init__() self.fc1 = nn.Linear(dim, hidden_dim) self.act = ACT2FN[hidden_act] self.fc2 = nn.Linear(hidden_dim, dim) def forward(self, x) -> torch.Tensor: return self.fc2(self.act(self.fc1(x)))
class_definition
14,447
14,797
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,511
class VisionAttention(nn.Module): def __init__(self, dim: int, num_heads: int = 16) -> None: super().__init__() self.num_heads = num_heads self.head_dim = dim // num_heads self.qkv = nn.Linear(dim, dim * 3, bias=True) self.proj = nn.Linear(dim, dim) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None ) -> torch.Tensor: seq_length = hidden_states.shape[0] q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) attention_mask = torch.full( [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype ) for i in range(1, len(cu_seqlens)): attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(self.head_dim) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) attn_output = torch.matmul(attn_weights, v) attn_output = attn_output.transpose(0, 1) attn_output = attn_output.reshape(seq_length, -1) attn_output = self.proj(attn_output) return attn_output
class_definition
14,800
16,421
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,512
class VisionFlashAttention2(nn.Module): def __init__(self, dim: int, num_heads: int = 16) -> None: super().__init__() self.num_heads = num_heads self.qkv = nn.Linear(dim, dim * 3, bias=True) self.proj = nn.Linear(dim, dim) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None ) -> torch.Tensor: seq_length = hidden_states.shape[0] q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() attn_output = flash_attn_varlen_func(q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen).reshape( seq_length, -1 ) attn_output = self.proj(attn_output) return attn_output
class_definition
16,424
17,443
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,513
class VisionSdpaAttention(nn.Module): def __init__(self, dim: int, num_heads: int = 16) -> None: super().__init__() self.num_heads = num_heads self.qkv = nn.Linear(dim, dim * 3, bias=True) self.proj = nn.Linear(dim, dim) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None ) -> torch.Tensor: seq_length = hidden_states.shape[0] q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool) for i in range(1, len(cu_seqlens)): attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = True q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0) attn_output = attn_output.transpose(0, 1) attn_output = attn_output.reshape(seq_length, -1) attn_output = self.proj(attn_output) return attn_output
class_definition
17,446
18,791
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,514
class Qwen2VLVisionBlock(nn.Module): def __init__(self, config, attn_implementation: str = "sdpa") -> None: super().__init__() self.norm1 = LayerNorm(config.embed_dim, eps=1e-6) self.norm2 = LayerNorm(config.embed_dim, eps=1e-6) mlp_hidden_dim = int(config.embed_dim * config.mlp_ratio) self.attn = QWEN2_VL_VISION_ATTENTION_CLASSES[attn_implementation]( config.embed_dim, num_heads=config.num_heads ) self.mlp = VisionMlp(dim=config.embed_dim, hidden_dim=mlp_hidden_dim, hidden_act=config.hidden_act) def forward(self, hidden_states, cu_seqlens, rotary_pos_emb) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb ) hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) return hidden_states
class_definition
18,947
19,862
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,515
class Qwen2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Qwen2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class_definition
19,933
20,653
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,516
class Qwen2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class_definition
20,720
21,388
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,517
class Qwen2VLAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: Qwen2VLConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.attention_dropout = config.attention_dropout self.rope_scaling = config.rope_scaling if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = Qwen2VLRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # Fix precision issues in Qwen2-VL float16 inference # Replace inf values with zeros in attention weights to prevent NaN propagation if query_states.dtype == torch.float16: attn_weights = torch.where(torch.isinf(attn_weights), torch.zeros_like(attn_weights), attn_weights) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
22,065
27,413
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,518
class Qwen2VLFlashAttention2(Qwen2VLAttention): """ Qwen2VL flash attention module, following Qwen2VL attention module. This module inherits from `Qwen2VLAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom config.max_window_layers layers. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) # Because the input can be padded, the absolute sequence length depends on the max position id. cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window else: sliding_window = None attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, sliding_window=sliding_window, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
27,416
32,776
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,519
class Qwen2VLSdpaAttention(Qwen2VLAttention): """ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from Qwen2Attention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "Qwen2VLModel is using Qwen2VLSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value
class_definition
32,779
37,491
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,520
class Qwen2VLDecoderLayer(nn.Module): def __init__(self, config: Qwen2VLConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size if config.use_sliding_window and config._attn_implementation != "flash_attention_2": logger.warning_once( f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " "unexpected results may be encountered." ) self.self_attn = QWEN2_VL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = Qwen2MLP(config) self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class_definition
37,643
41,599
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,521
class Qwen2VLPreTrainedModel(PreTrainedModel): config_class = Qwen2VLConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Qwen2VLDecoderLayer", "Qwen2VLVisionBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv3d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
42,629
43,523
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,522
class Qwen2VisionTransformerPretrainedModel(Qwen2VLPreTrainedModel): config_class = Qwen2VLVisionConfig _no_split_modules = ["Qwen2VLVisionBlock"] def __init__(self, config) -> None: super().__init__(config) self.spatial_merge_size = config.spatial_merge_size self.patch_embed = PatchEmbed( patch_size=config.patch_size, temporal_patch_size=config.temporal_patch_size, in_channels=config.in_channels, embed_dim=config.embed_dim, ) head_dim = config.embed_dim // config.num_heads self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) self.blocks = nn.ModuleList( [Qwen2VLVisionBlock(config, config._attn_implementation) for _ in range(config.depth)] ) self.merger = PatchMerger( dim=config.hidden_size, context_dim=config.embed_dim, spatial_merge_size=config.spatial_merge_size ) self.gradient_checkpointing = False def get_dtype(self) -> torch.dtype: return self.blocks[0].mlp.fc2.weight.dtype def get_device(self) -> torch.device: return self.blocks[0].mlp.fc2.weight.device def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor: hidden_states = self.patch_embed(hidden_states) rotary_pos_emb = self.rot_pos_emb(grid_thw) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) for blk in self.blocks: if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( blk.__call__, hidden_states, cu_seqlens, rotary_pos_emb ) else: hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb) return self.merger(hidden_states)
class_definition
43,526
47,118
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,523
class Qwen2VLModel(Qwen2VLPreTrainedModel): def __init__(self, config: Qwen2VLConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Qwen2VLDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen2VLRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `3` is for temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) elif position_ids.dim() == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask with Phi3->Qwen2VL def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Qwen2VL. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if ( self.config._attn_implementation == "sdpa" and not (using_static_cache or using_sliding_window_cache) and not output_attentions ): if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] # SlidingWindowCache or StaticCache if using_sliding_window_cache or using_static_cache: target_length = past_key_values.get_max_cache_shape() # DynamicCache or no cache else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Qwen2VL def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, config: Qwen2VLConfig, past_key_values: Cache, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`Qwen2VLConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) if config.sliding_window is not None: # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also # the check is needed to verify is current checkpoint was trained with sliding window or not if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=device) <= ( cache_position.reshape(-1, 1) - config.sliding_window ) diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
47,268
61,041
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,524
class Qwen2VLForConditionalGeneration(Qwen2VLPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.visual = Qwen2VisionTransformerPretrainedModel._from_config(config.vision_config) self.model = Qwen2VLModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.rope_deltas = None # cache rope_deltas here # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def get_rope_index( self, input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with mordern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embeddin for text part. Examples: Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [3, 4, 5, 6, 7] text height position_ids: [3, 4, 5, 6, 7] text width position_ids: [3, 4, 5, 6, 7] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = self.config.vision_config.spatial_merge_size image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id vision_start_token_id = self.config.vision_start_token_id mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device ) image_index, video_index = 0, 0 for i, input_ids in enumerate(total_input_ids): input_ids = input_ids[attention_mask[i] == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] else: position_ids = ( torch.arange(input_ids.shape[1], device=input_ids.device) .view(1, 1, -1) .expand(3, input_ids.shape[0], -1) ) mrope_position_deltas = torch.zeros( [input_ids.shape[0], 1], device=input_ids.device, dtype=input_ids.dtype, ) return position_ids, mrope_position_deltas @add_start_docstrings_to_model_forward(QWEN2_VL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Qwen2VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, Qwen2VLCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Qwen2VLForConditionalGeneration >>> model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") >>> messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos]) >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.model.embed_tokens(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(self.visual.get_dtype()) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == self.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) image_mask = ( (input_ids == self.config.image_token_id) .unsqueeze(-1) .expand_as(inputs_embeds) .to(inputs_embeds.device) ) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype()) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == self.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}" ) video_mask = ( (input_ids == self.config.video_token_id) .unsqueeze(-1) .expand_as(inputs_embeds) .to(inputs_embeds.device) ) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) # if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme if position_ids is None and (attention_mask is None or attention_mask.ndim == 2): # calculate RoPE index once per generation in the pre-fill stage only if (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None: position_ids, rope_deltas = self.get_rope_index( input_ids, image_grid_thw, video_grid_thw, attention_mask ) self.rope_deltas = rope_deltas # then use the prev pre-calculated rope-deltas to get the correct position ids else: batch_size, seq_length, _ = inputs_embeds.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=inputs_embeds.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) if cache_position is not None: # otherwise `deltas` is an int `0` delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Qwen2VLCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here if past_key_values is not None: if inputs_embeds is not None: # Exception 1 input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] if cache_position[0] != 0: pixel_values = None pixel_values_videos = None # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None} else: model_inputs = {"input_ids": input_ids, "inputs_embeds": None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs["inputs_embeds"] is not None: batch_size, sequence_length, _ = inputs_embeds.shape device = inputs_embeds.device else: batch_size, sequence_length = input_ids.shape device = input_ids.device attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_cache_shape(), dtype=self.lm_head.weight.dtype, device=device, cache_position=cache_position, batch_size=batch_size, config=self.config, past_key_values=past_key_values, ) model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "pixel_values": pixel_values, "pixel_values_videos": pixel_values_videos, "image_grid_thw": image_grid_thw, "video_grid_thw": video_grid_thw, "cache_position": cache_position, } ) return model_inputs
class_definition
66,265
87,175
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
null
8,525
class Qwen2VLVisionConfig(PretrainedConfig): model_type = "qwen2_vl" base_config_key = "vision_config" def __init__( self, depth=32, embed_dim=1280, hidden_size=3584, hidden_act="quick_gelu", mlp_ratio=4, num_heads=16, in_channels=3, patch_size=14, spatial_merge_size=2, temporal_patch_size=2, **kwargs, ): super().__init__(**kwargs) self.depth = depth self.embed_dim = embed_dim self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_ratio = mlp_ratio self.num_heads = num_heads self.in_channels = in_channels self.patch_size = patch_size self.spatial_merge_size = spatial_merge_size self.temporal_patch_size = temporal_patch_size
class_definition
875
1,722
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py
null
8,526
class Qwen2VLConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2VLModel`]. It is used to instantiate a Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 152064): Vocabulary size of the Qwen2VL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen2VLModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 29568): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 80): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention (SWA) window size. If not specified, will default to `4096`. max_window_layers (`int`, *optional*, defaults to 80): The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. vision_config (`Dict`, *optional*): The config for the visual encoder initialization. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE ```python >>> from transformers import Qwen2VLForConditionalGeneration, Qwen2VLConfig >>> # Initializing a Qwen2VL style configuration >>> configuration = Qwen2VLConfig() >>> # Initializing a model from the Qwen2-VL-7B style configuration >>> model = Qwen2VLForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_vl" sub_configs = {"vision_config": Qwen2VLVisionConfig} keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Qwen2VL` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } def __init__( self, vocab_size=152064, hidden_size=8192, intermediate_size=29568, num_hidden_layers=80, num_attention_heads=64, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, tie_word_embeddings=False, rope_theta=1000000.0, use_sliding_window=False, sliding_window=4096, max_window_layers=80, attention_dropout=0.0, vision_config=None, rope_scaling=None, **kwargs, ): if isinstance(vision_config, dict): self.vision_config = Qwen2VLVisionConfig(**vision_config) elif vision_config is None: self.vision_config = Qwen2VLVisionConfig() self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window self.max_window_layers = max_window_layers # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.rope_scaling = rope_scaling # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. # and change type from 'mrope' to 'default' because `mrope` does defeault RoPE calculations # one can set it to "linear"/"dynamic" etc. to have scaled RoPE # TODO: @raushan update config in the hub if self.rope_scaling is not None and "type" in self.rope_scaling: if self.rope_scaling["type"] == "mrope": self.rope_scaling["type"] = "default" self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self, ignore_keys={"mrope_section"}) super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class_definition
1,725
12,149
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py
null
8,527
class Qwen2VLProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, }, }
class_definition
1,335
1,485
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/processing_qwen2_vl.py
null
8,528
class Qwen2VLProcessor(ProcessorMixin): r""" Constructs a Qwen2-VL processor which wraps a Qwen2-VL image processor and a Qwen2 tokenizer into a single processor. [`Qwen2VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the [`~Qwen2VLProcessor.__call__`] and [`~Qwen2VLProcessor.decode`] for more information. Args: image_processor ([`Qwen2VLImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`Qwen2TokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = ["chat_template"] image_processor_class = "Qwen2VLImageProcessor" tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast") def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs): self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, videos: VideoInput = None, **kwargs: Unpack[Qwen2VLProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. """ output_kwargs = self._merge_kwargs( Qwen2VLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: image_inputs = self.image_processor(images=images, videos=None, **output_kwargs["images_kwargs"]) image_grid_thw = image_inputs["image_grid_thw"] else: image_inputs = {} image_grid_thw = None if videos is not None: videos_inputs = self.image_processor(images=None, videos=videos, **output_kwargs["videos_kwargs"]) video_grid_thw = videos_inputs["video_grid_thw"] else: videos_inputs = {} video_grid_thw = None if not isinstance(text, list): text = [text] if image_grid_thw is not None: merge_length = self.image_processor.merge_size**2 index = 0 for i in range(len(text)): while self.image_token in text[i]: text[i] = text[i].replace( self.image_token, "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), 1 ) index += 1 text[i] = text[i].replace("<|placeholder|>", self.image_token) if video_grid_thw is not None: merge_length = self.image_processor.merge_size**2 index = 0 for i in range(len(text)): while self.video_token in text[i]: text[i] = text[i].replace( self.video_token, "<|placeholder|>" * (video_grid_thw[index].prod() // merge_length), 1 ) index += 1 text[i] = text[i].replace("<|placeholder|>", self.video_token) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def post_process_image_text_to_text(self, generated_outputs): """ Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. Returns: `List[str]`: The decoded text. """ return self.tokenizer.batch_decode( generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False ) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class_definition
1,488
9,482
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/processing_qwen2_vl.py
null
8,529
class Qwen2VLImageProcessor(BaseImageProcessor): r""" Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. min_pixels (`int`, *optional*, defaults to `56 * 56`): The min pixels of the image to resize the image. max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`): The max pixels of the image to resize the image. patch_size (`int`, *optional*, defaults to 14): The spacial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to 2): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder. """ model_input_names = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"] def __init__( self, do_resize: bool = True, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, min_pixels: int = 56 * 56, max_pixels: int = 28 * 28 * 1280, patch_size: int = 14, temporal_patch_size: int = 2, merge_size: int = 2, **kwargs, ) -> None: super().__init__(**kwargs) self.do_resize = do_resize self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.min_pixels = min_pixels self.max_pixels = max_pixels self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.merge_size = merge_size self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels} self.do_convert_rgb = do_convert_rgb def _preprocess( self, images: Union[ImageInput, VideoInput], do_resize: bool = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`. vision_info (`List[Dict]`, *optional*): Optional list of dictionaries containing additional information about vision inputs. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_list_of_images(images) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) height, width = get_image_size(images[0], channel_dim=input_data_format) resized_height, resized_width = height, width processed_images = [] for image in images: if do_resize: resized_height, resized_width = smart_resize( height, width, factor=self.patch_size * self.merge_size, min_pixels=self.min_pixels, max_pixels=self.max_pixels, ) image = resize( image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) processed_images.append(image) patches = np.array(processed_images) if data_format == ChannelDimension.LAST: patches = patches.transpose(0, 3, 1, 2) if patches.shape[0] % self.temporal_patch_size != 0: repeats = np.repeat(patches[-1][np.newaxis], self.temporal_patch_size - 1, axis=0) patches = np.concatenate([patches, repeats], axis=0) channel = patches.shape[1] grid_t = patches.shape[0] // self.temporal_patch_size grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size patches = patches.reshape( grid_t, self.temporal_patch_size, channel, grid_h // self.merge_size, self.merge_size, self.patch_size, grid_w // self.merge_size, self.merge_size, self.patch_size, ) patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8) flatten_patches = patches.reshape( grid_t * grid_h * grid_w, channel * self.temporal_patch_size * self.patch_size * self.patch_size ) return flatten_patches, (grid_t, grid_h, grid_w) def preprocess( self, images: ImageInput, videos: VideoInput = None, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. videos (`VideoInput`): Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If passing in videos with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb if images is not None: images = make_batched_images(images) if videos is not None: videos = make_batched_videos(videos) if images is not None and not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if images is not None: pixel_values, vision_grid_thws = [], [] for image in images: patches, image_grid_thw = self._preprocess( image, do_resize=do_resize, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, ) pixel_values.extend(patches) vision_grid_thws.append(image_grid_thw) pixel_values = np.array(pixel_values) vision_grid_thws = np.array(vision_grid_thws) data = {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws} if videos is not None: pixel_values, vision_grid_thws = [], [] for images in videos: patches, video_grid_thw = self._preprocess( images, do_resize=do_resize, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, ) pixel_values.extend(patches) vision_grid_thws.append(video_grid_thw) pixel_values = np.array(pixel_values) vision_grid_thws = np.array(vision_grid_thws) data = {"pixel_values_videos": pixel_values, "video_grid_thw": vision_grid_thws} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
4,508
22,359
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py
null
8,530
class GPTSw3Tokenizer(PreTrainedTokenizer): """ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Example usage: ```python >>> from transformers import GPTSw3Tokenizer >>> tokenizer = GPTSw3Tokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-126m") >>> tokenizer("Svenska är kul!")["input_ids"] [1814, 377, 3617, 63504] ``` Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `False`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether or not to keep accents when tokenizing. pad_token (`str`, *optional*): The token used for padding, for example when batching sequences of different lengths. If not provided, will default to '<pad>' or '<unk>' depending on model size. unk_token (`str`, *optional*): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. If not provided, will default to '<unk>'. eos_token (`str`, *optional*): The end of sequence token seen during pretraining. If not provided, will default to '<|endoftext|>' bos_token (`str`, *optional*): The beginning of sequence token that can be used for downstream task, was not seen during pretraining. If not provided, will default to '<s>' or '<|endoftext|>', depending on model size. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). whitespaces (`set`): The whitespaces that are replaced in the whitespace normalization in preprocessing. non_printing_characters_re (`Pattern`): The compiled regular expression to remove non-printing characters in preprocessing. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, do_lower_case=False, remove_space=False, keep_accents=False, pad_token=None, unk_token=None, eos_token=None, bos_token=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs name_or_path = kwargs.get("name_or_path") if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) name_or_path = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing eos_token = "<|endoftext|>" if eos_token is None else eos_token unk_token = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: pad_token = unk_token if pad_token is None else pad_token bos_token = eos_token if bos_token is None else bos_token else: pad_token = "<pad>" if pad_token is None else pad_token bos_token = "<s>" if bos_token is None else bos_token self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) # Used for whitespace normalization in input texts # fmt : off self.whitespaces = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing self.non_printing_characters_re = re.compile( f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]" ) super().__init__( do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__getstate__ def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__setstate__ def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def vocab_size(self) -> int: return len(self.sp_model) def preprocess_text(self, text: str) -> str: """ Returns the preprocessed text. This procedure is identical to what was used when training the tokenizer. """ # Remove non-printing characters text = self.non_printing_characters_re.sub("", text) # Normalize whitespaces text = "".join([char if char not in self.whitespaces else " " for char in text]) # NFC Unicode normalization text = unicodedata.normalize("NFC", text) return text def _tokenize(self, text: str, **kwargs) -> List[str]: text = self.preprocess_text(text) return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) to an id (int) using the vocab.""" return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index: int) -> str: """Converts an index (int) to a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) @staticmethod def clean_up_tokenization(out_string: str) -> str: """Returns the input string, this function is overridden to remove the default clean up.""" return out_string def convert_tokens_to_string(self, tokens: List[str]) -> str: """Converts a sequence of tokens (strings) to a single string. Special tokens remain intact.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.get_vocab def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def encode_fast( self, text: Union[str, List[str]], return_tensors: Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced functionality but is often much faster. Does NOT handle special tokens correctly, these can manually be added as ids afterwards. Does NOT support padding, these can manually be added as ids afterwards. Use default HuggingFace tokenization methods for full functionality. Args: text (`str` or `List[str]`): One or several text(s) to convert to token ids. return_tensors (`str` or `bool`): Returns PyTorch tensors if set to True or "pt" Returns: `List[int]`, `List[List[int]]`, or `torch.Tensor`: The encoded text(s) as token ids. """ if isinstance(text, str): text = self.preprocess_text(text) token_ids = self.sp_model.encode(text) else: text = [self.preprocess_text(t) for t in text] token_ids = self.sp_model.encode(text) if return_tensors is True or return_tensors == "pt": token_ids = torch.tensor(token_ids) return token_ids def decode_fast(self, token_ids: Union[int, List[int]]) -> str: """ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced functionality but is often much faster. Args: token_ids (`int` or `List[int]`): Encoded token or text as token id(s). Returns: `str`: Decoded text """ return self.sp_model.decode(token_ids)
class_definition
445
12,469
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py
null
8,531
class TFSwiftFormerPatchEmbeddingSequential(keras.layers.Layer): """ The sequential component of the patch embedding layer. Input: tensor of shape `[batch_size, in_channels, height, width]` Output: tensor of shape `[batch_size, out_channels, height/4, width/4]` """ def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.out_chs = config.embed_dims[0] self.zero_padding = keras.layers.ZeroPadding2D(padding=(1, 1)) self.conv1 = keras.layers.Conv2D(self.out_chs // 2, kernel_size=3, strides=2, name="0") self.batch_norm1 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="1") self.conv2 = keras.layers.Conv2D(self.out_chs, kernel_size=3, strides=2, name="3") self.batch_norm2 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="4") self.config = config def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.zero_padding(x) x = self.conv1(x) x = self.batch_norm1(x, training=training) x = get_tf_activation("relu")(x) x = self.zero_padding(x) x = self.conv2(x) x = self.batch_norm2(x, training=training) x = get_tf_activation("relu")(x) return x def build(self, input_shape=None): if self.built: return if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(self.config.num_channels) if getattr(self, "batch_norm1", None) is not None: with tf.name_scope(self.batch_norm1.name): self.batch_norm1.build((None, None, None, self.out_chs // 2)) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build((None, None, None, self.out_chs // 2)) if getattr(self, "batch_norm2", None) is not None: with tf.name_scope(self.batch_norm2.name): self.batch_norm2.build((None, None, None, self.out_chs)) self.built = True
class_definition
1,530
3,687
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,532
class TFSwiftFormerPatchEmbedding(keras.layers.Layer): """ Patch Embedding Layer constructed of two 2D convolutional layers. Input: tensor of shape `[batch_size, in_channels, height, width]` Output: tensor of shape `[batch_size, out_channels, height/4, width/4]` """ def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.patch_embedding = TFSwiftFormerPatchEmbeddingSequential(config, name="patch_embedding") def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: return self.patch_embedding(x, training=training) def build(self, input_shape=None): if self.built: return if getattr(self, "patch_embedding", None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build(None) self.built = True
class_definition
3,690
4,585
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,533
class TFSwiftFormerDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(**kwargs) raise NotImplementedError("Drop path is not implemented in TF port") def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: raise NotImplementedError("Drop path is not implemented in TF port")
class_definition
4,588
5,078
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,534
class TFSwiftFormerEmbeddings(keras.layers.Layer): """ Embeddings layer consisting of a single 2D convolutional and batch normalization layer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height/stride, width/stride]` """ def __init__(self, config: SwiftFormerConfig, index: int, **kwargs): super().__init__(**kwargs) patch_size = config.down_patch_size stride = config.down_stride padding = config.down_pad embed_dims = config.embed_dims self.in_chans = embed_dims[index] self.embed_dim = embed_dims[index + 1] patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.pad = keras.layers.ZeroPadding2D(padding=padding) self.proj = keras.layers.Conv2D(self.embed_dim, kernel_size=patch_size, strides=stride, name="proj") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.pad(x) x = self.proj(x) x = self.norm(x, training=training) return x def build(self, input_shape=None): if self.built: return if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build(self.in_chans) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.embed_dim)) self.built = True
class_definition
5,081
6,939
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,535
class TFSwiftFormerConvEncoder(keras.layers.Layer): """ `SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, **kwargs): super().__init__(**kwargs) hidden_dim = int(config.mlp_ratio * dim) self.dim = dim self.pad = keras.layers.ZeroPadding2D(padding=(1, 1)) self.depth_wise_conv = keras.layers.Conv2D(dim, kernel_size=3, groups=dim, name="depth_wise_conv") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.point_wise_conv1 = keras.layers.Conv2D(hidden_dim, kernel_size=1, name="point_wise_conv1") self.act = get_tf_activation("gelu") self.point_wise_conv2 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv2") self.drop_path = keras.layers.Dropout(name="drop_path", rate=config.drop_conv_encoder_rate) self.hidden_dim = int(config.mlp_ratio * self.dim) def build(self, input_shape=None): if self.built: return self.layer_scale = self.add_weight( name="layer_scale", shape=self.dim, initializer="ones", trainable=True, ) if getattr(self, "depth_wise_conv", None) is not None: with tf.name_scope(self.depth_wise_conv.name): self.depth_wise_conv.build(self.dim) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.dim)) if getattr(self, "point_wise_conv1", None) is not None: with tf.name_scope(self.point_wise_conv1.name): self.point_wise_conv1.build(self.dim) if getattr(self, "point_wise_conv2", None) is not None: with tf.name_scope(self.point_wise_conv2.name): self.point_wise_conv2.build(self.hidden_dim) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: input = x x = self.pad(x) x = self.depth_wise_conv(x) x = self.norm(x, training=training) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x) return x
class_definition
6,942
9,572
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,536
class TFSwiftFormerMlp(keras.layers.Layer): """ MLP layer with 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, in_features: int, **kwargs): super().__init__(**kwargs) hidden_features = int(in_features * config.mlp_ratio) self.norm1 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm1") self.fc1 = keras.layers.Conv2D(hidden_features, 1, name="fc1") act_layer = get_tf_activation(config.hidden_act) self.act = act_layer self.fc2 = keras.layers.Conv2D(in_features, 1, name="fc2") self.drop = keras.layers.Dropout(rate=config.drop_mlp_rate) self.hidden_features = hidden_features self.in_features = in_features def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.norm1(x, training=training) x = self.fc1(x) x = self.act(x) x = self.drop(x, training=training) x = self.fc2(x) x = self.drop(x, training=training) return x def build(self, input_shape=None): if self.built: return if getattr(self, "norm1", None) is not None: with tf.name_scope(self.norm1.name): self.norm1.build((None, None, None, self.in_features)) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build((None, None, None, self.in_features)) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build((None, None, None, self.hidden_features)) self.built = True
class_definition
9,575
11,389
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,537
class TFSwiftFormerEfficientAdditiveAttention(keras.layers.Layer): """ Efficient Additive Attention module for SwiftFormer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int = 512, **kwargs): super().__init__(**kwargs) self.dim = dim self.to_query = keras.layers.Dense(dim, name="to_query") self.to_key = keras.layers.Dense(dim, name="to_key") self.scale_factor = dim**-0.5 self.proj = keras.layers.Dense(dim, name="proj") self.final = keras.layers.Dense(dim, name="final") def build(self, input_shape=None): if self.built: return self.w_g = self.add_weight( name="w_g", shape=(self.dim, 1), initializer=keras.initializers.RandomNormal(mean=0, stddev=1), trainable=True, ) if getattr(self, "to_query", None) is not None: with tf.name_scope(self.to_query.name): self.to_query.build(self.dim) if getattr(self, "to_key", None) is not None: with tf.name_scope(self.to_key.name): self.to_key.build(self.dim) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build(self.dim) if getattr(self, "final", None) is not None: with tf.name_scope(self.final.name): self.final.build(self.dim) self.built = True def call(self, x: tf.Tensor) -> tf.Tensor: query = self.to_query(x) key = self.to_key(x) query = tf.math.l2_normalize(query, dim=-1) key = tf.math.l2_normalize(key, dim=-1) query_weight = query @ self.w_g scaled_query_weight = query_weight * self.scale_factor scaled_query_weight = tf.nn.softmax(scaled_query_weight, axis=-1) global_queries = tf.math.reduce_sum(scaled_query_weight * query, axis=1) global_queries = tf.tile(tf.expand_dims(global_queries, 1), (1, key.shape[1], 1)) out = self.proj(global_queries * key) + query out = self.final(out) return out
class_definition
11,392
13,657
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,538
class TFSwiftFormerLocalRepresentation(keras.layers.Layer): """ Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, **kwargs): super().__init__(**kwargs) self.dim = dim self.pad = keras.layers.ZeroPadding2D(padding=(1, 1)) self.depth_wise_conv = keras.layers.Conv2D(dim, kernel_size=3, groups=dim, name="depth_wise_conv") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.point_wise_conv1 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv1") self.act = get_tf_activation("gelu") self.point_wise_conv2 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv2") self.drop_path = keras.layers.Identity(name="drop_path") def build(self, input_shape=None): if self.built: return self.layer_scale = self.add_weight( name="layer_scale", shape=(self.dim), initializer="ones", trainable=True, ) if getattr(self, "depth_wise_conv", None) is not None: with tf.name_scope(self.depth_wise_conv.name): self.depth_wise_conv.build((None, None, None, self.dim)) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.dim)) if getattr(self, "point_wise_conv1", None) is not None: with tf.name_scope(self.point_wise_conv1.name): self.point_wise_conv1.build(self.dim) if getattr(self, "point_wise_conv2", None) is not None: with tf.name_scope(self.point_wise_conv2.name): self.point_wise_conv2.build(self.dim) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: input = x x = self.pad(x) x = self.depth_wise_conv(x) x = self.norm(x, training=training) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x, training=training) return x
class_definition
13,660
16,237
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,539
class TFSwiftFormerEncoderBlock(keras.layers.Layer): """ SwiftFormer Encoder Block for SwiftFormer. It consists of (1) Local representation module, (2) SwiftFormerEfficientAdditiveAttention, and (3) MLP block. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels,height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, drop_path: float = 0.0, **kwargs): super().__init__(**kwargs) layer_scale_init_value = config.layer_scale_init_value use_layer_scale = config.use_layer_scale self.local_representation = TFSwiftFormerLocalRepresentation(config, dim=dim, name="local_representation") self.attn = TFSwiftFormerEfficientAdditiveAttention(config, dim=dim, name="attn") self.linear = TFSwiftFormerMlp(config, in_features=dim, name="linear") self.drop_path = TFSwiftFormerDropPath(config) if drop_path > 0.0 else keras.layers.Identity() self.use_layer_scale = use_layer_scale if use_layer_scale: self.dim = dim self.layer_scale_init_value = layer_scale_init_value def build(self, input_shape=None): if self.built: return self.layer_scale_1 = self.add_weight( name="layer_scale_1", shape=self.dim, initializer=keras.initializers.constant(self.layer_scale_init_value), trainable=True, ) self.layer_scale_2 = self.add_weight( name="layer_scale_2", shape=self.dim, initializer=keras.initializers.constant(self.layer_scale_init_value), trainable=True, ) if getattr(self, "local_representation", None) is not None: with tf.name_scope(self.local_representation.name): self.local_representation.build(None) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "linear", None) is not None: with tf.name_scope(self.linear.name): self.linear.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False): x = self.local_representation(x, training=training) batch_size, height, width, channels = x.shape res = tf.reshape(x, [-1, height * width, channels]) res = self.attn(res) res = tf.reshape(res, [-1, height, width, channels]) if self.use_layer_scale: x = x + self.drop_path(self.layer_scale_1 * res, training=training) x = x + self.drop_path(self.layer_scale_2 * self.linear(x), training=training) else: x = x + self.drop_path(res, training=training) x = x + self.drop_path(self.linear(x), training=training) return x
class_definition
16,240
19,125
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,540
class TFSwiftFormerStage(keras.layers.Layer): """ A Swiftformer stage consisting of a series of `SwiftFormerConvEncoder` blocks and a final `SwiftFormerEncoderBlock`. Input: tensor in shape `[batch_size, channels, height, width]` Output: tensor in shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, index: int, **kwargs) -> None: super().__init__(**kwargs) layer_depths = config.depths dim = config.embed_dims[index] depth = layer_depths[index] self.blocks = [] for block_idx in range(depth): block_dpr = config.drop_path_rate * (block_idx + sum(layer_depths[:index])) / (sum(layer_depths) - 1) if depth - block_idx <= 1: self.blocks.append( TFSwiftFormerEncoderBlock(config, dim=dim, drop_path=block_dpr, name=f"blocks_._{block_idx}") ) else: self.blocks.append(TFSwiftFormerConvEncoder(config, dim=dim, name=f"blocks_._{block_idx}")) def call(self, input: tf.Tensor, training: bool = False) -> tf.Tensor: for i, block in enumerate(self.blocks): input = block(input, training=training) return input def build(self, input_shape=None): for layer in self.blocks: with tf.name_scope(layer.name): layer.build(None)
class_definition
19,128
20,543
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,541
class TFSwiftFormerEncoder(keras.layers.Layer): def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config embed_dims = config.embed_dims downsamples = config.downsamples layer_depths = config.depths # Transformer model self.network = [] name_i = 0 for i in range(len(layer_depths)): stage = TFSwiftFormerStage(config, index=i, name=f"network_._{name_i}") self.network.append(stage) name_i += 1 if i >= len(layer_depths) - 1: break if downsamples[i] or embed_dims[i] != embed_dims[i + 1]: # downsampling between two stages self.network.append(TFSwiftFormerEmbeddings(config, index=i, name=f"network_._{name_i}")) name_i += 1 self.gradient_checkpointing = False def call( self, hidden_states: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFBaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = (hidden_states,) if output_hidden_states else None for i, block in enumerate(self.network): hidden_states = block(hidden_states, training=training) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) if all_hidden_states: all_hidden_states = tuple(tf.transpose(s, perm=[0, 3, 1, 2]) for s in all_hidden_states) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) def build(self, input_shape=None): for layer in self.network: with tf.name_scope(layer.name): layer.build(None)
class_definition
20,546
22,896
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,542
class TFSwiftFormerPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwiftFormerConfig base_model_prefix = "swiftformer" main_input_name = "pixel_values"
class_definition
22,899
23,211
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,543
class TFSwiftFormerMainLayer(keras.layers.Layer): config_class = SwiftFormerConfig def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.config = config self.patch_embed = TFSwiftFormerPatchEmbedding(config, name="patch_embed") self.encoder = TFSwiftFormerEncoder(config, name="encoder") @unpack_inputs def call( self, pixel_values: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]: r""" """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 image layers can't use NCHW format when running on CPU. # We transpose to NHWC format and then transpose back after the full forward pass. # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.patch_embed(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return tuple(v for v in encoder_outputs if v is not None) return TFBaseModelOutputWithNoAttention( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) self.built = True
class_definition
25,871
28,187
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,544
class TFSwiftFormerModel(TFSwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.swiftformer = TFSwiftFormerMainLayer(config, name="swiftformer") @unpack_inputs @add_start_docstrings_to_model_forward(TFSWIFTFORMER_INPUTS_DOCSTRING) def call( self, pixel_values: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithNoAttention, Tuple[tf.Tensor]]: outputs = self.swiftformer( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return if getattr(self, "swiftformer", None) is not None: with tf.name_scope(self.swiftformer.name): self.swiftformer.build(None) self.built = True
class_definition
28,361
29,485
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,545
class TFSwiftFormerForImageClassification(TFSwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.num_labels = config.num_labels self.swiftformer = TFSwiftFormerMainLayer(config, name="swiftformer") # Classifier head self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.head = ( keras.layers.Dense(self.num_labels, name="head") if self.num_labels > 0 else keras.layers.Identity(name="head") ) self.dist_head = ( keras.layers.Dense(self.num_labels, name="dist_head") if self.num_labels > 0 else keras.layers.Identity(name="dist_head") ) def hf_compute_loss(self, labels, logits): if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == tf.int64 or labels.dtype == tf.int32): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = keras.losses.MSE if self.num_labels == 1: loss = loss_fct(labels.squeeze(), logits.squeeze()) else: loss = loss_fct(labels, logits) elif self.config.problem_type == "single_label_classification": loss_fct = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE ) loss = loss_fct(labels, logits) elif self.config.problem_type == "multi_label_classification": loss_fct = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE, ) loss = loss_fct(labels, logits) else: loss = None return loss @unpack_inputs @add_start_docstrings_to_model_forward(TFSWIFTFORMER_INPUTS_DOCSTRING) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFImageClassifierOutputWithNoAttention]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # run base model outputs = self.swiftformer( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs.last_hidden_state if return_dict else outputs[0] sequence_output = tf.transpose(sequence_output, perm=[0, 2, 3, 1]) # run classification head sequence_output = self.norm(sequence_output, training=training) sequence_output = tf.transpose(sequence_output, perm=[0, 3, 1, 2]) _, num_channels, height, width = sequence_output.shape sequence_output = tf.reshape(sequence_output, [-1, num_channels, height * width]) sequence_output = tf.reduce_mean(sequence_output, axis=-1) cls_out = self.head(sequence_output) distillation_out = self.dist_head(sequence_output) logits = (cls_out + distillation_out) / 2 # calculate loss loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return if getattr(self, "swiftformer", None) is not None: with tf.name_scope(self.swiftformer.name): self.swiftformer.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.config.embed_dims[-1])) if getattr(self, "head", None) is not None: with tf.name_scope(self.head.name): self.head.build(self.config.embed_dims[-1]) if getattr(self, "dist_head", None) is not None: with tf.name_scope(self.dist_head.name): self.dist_head.build(self.config.embed_dims[-1]) self.built = True
class_definition
29,663
34,859
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py
null
8,546
class SwiftFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwiftFormerModel`]. It is used to instantiate an SwiftFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SwiftFormer [MBZUAI/swiftformer-xs](https://huggingface.co/MBZUAI/swiftformer-xs) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image num_channels (`int`, *optional*, defaults to 3): The number of input channels depths (`List[int]`, *optional*, defaults to `[3, 3, 6, 4]`): Depth of each stage embed_dims (`List[int]`, *optional*, defaults to `[48, 56, 112, 220]`): The embedding dimension at each stage mlp_ratio (`int`, *optional*, defaults to 4): Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input. downsamples (`List[bool]`, *optional*, defaults to `[True, True, True, True]`): Whether or not to downsample inputs between two stages. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string). `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. down_patch_size (`int`, *optional*, defaults to 3): The size of patches in downsampling layers. down_stride (`int`, *optional*, defaults to 2): The stride of convolution kernels in downsampling layers. down_pad (`int`, *optional*, defaults to 1): Padding in downsampling layers. drop_path_rate (`float`, *optional*, defaults to 0.0): Rate at which to increase dropout probability in DropPath. drop_mlp_rate (`float`, *optional*, defaults to 0.0): Dropout rate for the MLP component of SwiftFormer. drop_conv_encoder_rate (`float`, *optional*, defaults to 0.0): Dropout rate for the ConvEncoder component of SwiftFormer. use_layer_scale (`bool`, *optional*, defaults to `True`): Whether to scale outputs from token mixers. layer_scale_init_value (`float`, *optional*, defaults to 1e-05): Factor by which outputs from token mixers are scaled. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. Example: ```python >>> from transformers import SwiftFormerConfig, SwiftFormerModel >>> # Initializing a SwiftFormer swiftformer-base-patch16-224 style configuration >>> configuration = SwiftFormerConfig() >>> # Initializing a model (with random weights) from the swiftformer-base-patch16-224 style configuration >>> model = SwiftFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "swiftformer" def __init__( self, image_size=224, num_channels=3, depths=[3, 3, 6, 4], embed_dims=[48, 56, 112, 220], mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", down_patch_size=3, down_stride=2, down_pad=1, drop_path_rate=0.0, drop_mlp_rate=0.0, drop_conv_encoder_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-5, batch_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.depths = depths self.embed_dims = embed_dims self.mlp_ratio = mlp_ratio self.downsamples = downsamples self.hidden_act = hidden_act self.down_patch_size = down_patch_size self.down_stride = down_stride self.down_pad = down_pad self.drop_path_rate = drop_path_rate self.drop_mlp_rate = drop_mlp_rate self.drop_conv_encoder_rate = drop_conv_encoder_rate self.use_layer_scale = use_layer_scale self.layer_scale_init_value = layer_scale_init_value self.batch_norm_eps = batch_norm_eps
class_definition
925
5,391
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/configuration_swiftformer.py
null
8,547
class SwiftFormerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4
class_definition
5,394
5,798
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/configuration_swiftformer.py
null
8,548
class SwiftFormerPatchEmbedding(nn.Module): """ Patch Embedding Layer constructed of two 2D convolutional layers. Input: tensor of shape `[batch_size, in_channels, height, width]` Output: tensor of shape `[batch_size, out_channels, height/4, width/4]` """ def __init__(self, config: SwiftFormerConfig): super().__init__() in_chs = config.num_channels out_chs = config.embed_dims[0] self.patch_embedding = nn.Sequential( nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_chs // 2, eps=config.batch_norm_eps), nn.ReLU(), nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_chs, eps=config.batch_norm_eps), nn.ReLU(), ) def forward(self, x): return self.patch_embedding(x)
class_definition
1,598
2,495
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,549
class SwiftFormerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, config: SwiftFormerConfig) -> None: super().__init__() self.drop_prob = config.drop_path_rate def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
3,653
4,142
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,550
class SwiftFormerEmbeddings(nn.Module): """ Embeddings layer consisting of a single 2D convolutional and batch normalization layer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height/stride, width/stride]` """ def __init__(self, config: SwiftFormerConfig, index: int): super().__init__() patch_size = config.down_patch_size stride = config.down_stride padding = config.down_pad embed_dims = config.embed_dims in_chans = embed_dims[index] embed_dim = embed_dims[index + 1] patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=padding) self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) def forward(self, x): x = self.proj(x) x = self.norm(x) return x
class_definition
4,145
5,351
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,551
class SwiftFormerConvEncoder(nn.Module): """ `SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int): super().__init__() hidden_dim = int(config.mlp_ratio * dim) self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim) self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps) self.point_wise_conv1 = nn.Conv2d(dim, hidden_dim, kernel_size=1) self.act = nn.GELU() self.point_wise_conv2 = nn.Conv2d(hidden_dim, dim, kernel_size=1) self.drop_path = nn.Dropout(p=config.drop_conv_encoder_rate) self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True) def forward(self, x): input = x x = self.depth_wise_conv(x) x = self.norm(x) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x) return x
class_definition
5,354
6,531
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,552
class SwiftFormerMlp(nn.Module): """ MLP layer with 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, in_features: int): super().__init__() hidden_features = int(in_features * config.mlp_ratio) self.norm1 = nn.BatchNorm2d(in_features, eps=config.batch_norm_eps) self.fc1 = nn.Conv2d(in_features, hidden_features, 1) act_layer = ACT2CLS[config.hidden_act] self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, in_features, 1) self.drop = nn.Dropout(p=config.drop_mlp_rate) def forward(self, x): x = self.norm1(x) x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x
class_definition
6,534
7,440
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,553
class SwiftFormerEfficientAdditiveAttention(nn.Module): """ Efficient Additive Attention module for SwiftFormer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int = 512): super().__init__() self.to_query = nn.Linear(dim, dim) self.to_key = nn.Linear(dim, dim) self.w_g = nn.Parameter(torch.randn(dim, 1)) self.scale_factor = dim**-0.5 self.proj = nn.Linear(dim, dim) self.final = nn.Linear(dim, dim) def forward(self, x): query = self.to_query(x) key = self.to_key(x) query = torch.nn.functional.normalize(query, dim=-1) key = torch.nn.functional.normalize(key, dim=-1) query_weight = query @ self.w_g scaled_query_weight = query_weight * self.scale_factor scaled_query_weight = scaled_query_weight.softmax(dim=-1) global_queries = torch.sum(scaled_query_weight * query, dim=1) global_queries = global_queries.unsqueeze(1).repeat(1, key.shape[1], 1) out = self.proj(global_queries * key) + query out = self.final(out) return out
class_definition
7,443
8,698
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,554
class SwiftFormerLocalRepresentation(nn.Module): """ Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int): super().__init__() self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim) self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps) self.point_wise_conv1 = nn.Conv2d(dim, dim, kernel_size=1) self.act = nn.GELU() self.point_wise_conv2 = nn.Conv2d(dim, dim, kernel_size=1) self.drop_path = nn.Identity() self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True) def forward(self, x): input = x x = self.depth_wise_conv(x) x = self.norm(x) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x) return x
class_definition
8,701
9,848
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,555
class SwiftFormerEncoderBlock(nn.Module): """ SwiftFormer Encoder Block for SwiftFormer. It consists of (1) Local representation module, (2) SwiftFormerEfficientAdditiveAttention, and (3) MLP block. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels,height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, drop_path: float = 0.0) -> None: super().__init__() layer_scale_init_value = config.layer_scale_init_value use_layer_scale = config.use_layer_scale self.local_representation = SwiftFormerLocalRepresentation(config, dim=dim) self.attn = SwiftFormerEfficientAdditiveAttention(config, dim=dim) self.linear = SwiftFormerMlp(config, in_features=dim) self.drop_path = SwiftFormerDropPath(config) if drop_path > 0.0 else nn.Identity() self.use_layer_scale = use_layer_scale if use_layer_scale: self.layer_scale_1 = nn.Parameter( layer_scale_init_value * torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True ) self.layer_scale_2 = nn.Parameter( layer_scale_init_value * torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True ) def forward(self, x): x = self.local_representation(x) batch_size, channels, height, width = x.shape res = self.attn(x.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels)) res = res.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) if self.use_layer_scale: x = x + self.drop_path(self.layer_scale_1 * res) x = x + self.drop_path(self.layer_scale_2 * self.linear(x)) else: x = x + self.drop_path(res) x = x + self.drop_path(self.linear(x)) return x
class_definition
9,851
11,745
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,556
class SwiftFormerStage(nn.Module): """ A Swiftformer stage consisting of a series of `SwiftFormerConvEncoder` blocks and a final `SwiftFormerEncoderBlock`. Input: tensor in shape `[batch_size, channels, height, width]` Output: tensor in shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, index: int) -> None: super().__init__() layer_depths = config.depths dim = config.embed_dims[index] depth = layer_depths[index] blocks = [] for block_idx in range(depth): block_dpr = config.drop_path_rate * (block_idx + sum(layer_depths[:index])) / (sum(layer_depths) - 1) if depth - block_idx <= 1: blocks.append(SwiftFormerEncoderBlock(config, dim=dim, drop_path=block_dpr)) else: blocks.append(SwiftFormerConvEncoder(config, dim=dim)) self.blocks = nn.ModuleList(blocks) def forward(self, input): for block in self.blocks: input = block(input) return input
class_definition
11,748
12,832
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,557
class SwiftFormerEncoder(nn.Module): def __init__(self, config: SwiftFormerConfig) -> None: super().__init__() self.config = config embed_dims = config.embed_dims downsamples = config.downsamples layer_depths = config.depths # Transformer model network = [] for i in range(len(layer_depths)): stage = SwiftFormerStage(config=config, index=i) network.append(stage) if i >= len(layer_depths) - 1: break if downsamples[i] or embed_dims[i] != embed_dims[i + 1]: # downsampling between two stages network.append(SwiftFormerEmbeddings(config, index=i)) self.network = nn.ModuleList(network) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.network: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, )
class_definition
12,835
14,644
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,558
class SwiftFormerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwiftFormerConfig base_model_prefix = "swiftformer" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["SwiftFormerEncoderBlock"] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Conv2d, nn.Linear)): nn.init.trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, (nn.LayerNorm)): nn.init.constant_(module.bias, 0) nn.init.constant_(module.weight, 1.0)
class_definition
14,647
15,525
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,559
class SwiftFormerModel(SwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig): super().__init__(config) self.config = config self.patch_embed = SwiftFormerPatchEmbedding(config) self.encoder = SwiftFormerEncoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SWIFTFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithNoAttention]: r""" """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.patch_embed(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return tuple(v for v in encoder_outputs if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, )
class_definition
16,941
18,724
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,560
class SwiftFormerForImageClassification(SwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig) -> None: super().__init__(config) embed_dims = config.embed_dims self.num_labels = config.num_labels self.swiftformer = SwiftFormerModel(config) # Classifier head self.norm = nn.BatchNorm2d(embed_dims[-1], eps=config.batch_norm_eps) self.head = nn.Linear(embed_dims[-1], self.num_labels) if self.num_labels > 0 else nn.Identity() self.dist_head = nn.Linear(embed_dims[-1], self.num_labels) if self.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SWIFTFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # run base model outputs = self.swiftformer( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs.last_hidden_state if return_dict else outputs[0] # run classification head sequence_output = self.norm(sequence_output) sequence_output = sequence_output.flatten(2).mean(-1) cls_out = self.head(sequence_output) distillation_out = self.dist_head(sequence_output) logits = (cls_out + distillation_out) / 2 # calculate loss loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
class_definition
18,898
22,745
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swiftformer/modeling_swiftformer.py
null
8,561
class NougatTokenizerFast(PreTrainedTokenizerFast): """ Fast tokenizer for Nougat (backed by HuggingFace tokenizers library). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific methods for postprocessing the generated text. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = None def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", **kwargs, ): super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs, ) self.vocab_file = vocab_file def remove_hallucinated_references(self, text: str) -> str: """ Remove hallucinated or missing references from the text. This function identifies and removes references that are marked as missing or hallucinated from the input text. Args: text (`str`): The input text containing references. Returns: `str`: The text with hallucinated references removed. """ lines = text.split("\n") if len(lines) == 0: return "" clean_lines = remove_numbers(lines) slices = get_slices(lines, clean_lines) to_delete = [] for slice in slices: to_delete.append(remove_slice_from_lines(lines, clean_lines, slice)) for to_delete in reversed(to_delete): text = text.replace(to_delete, "\n\n[MISSING_PAGE_POST]\n\n") text = re.sub( r"## References\n+\[MISSING_PAGE_POST(:\d+)?\]", "\n\n[MISSING_PAGE_POST\\1]", text, ) return text def correct_tables(self, generation: str) -> str: """ Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. Args: generation (str): The generated text to be postprocessed. Returns: str: The postprocessed text. Example: ```python correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}") "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}" ``` """ # remove obvious wrong tables for l in generation.split("\n"): if l.count("\\begin{tabular}") > 15 or l.count("\\multicolumn") > 60 or l.count("&") > 400: generation = generation.replace(l, "") # whitespace corrections generation = generation.replace("\\begin{table} \\begin{tabular}", "\\begin{table}\n\\begin{tabular}") generation = generation.replace("\\end{tabular} \\end{table}", "\\end{tabular}\n\\end{table}") generation = generation.replace("\\end{table} Tab", "\\end{table}\nTab") generation = re.sub(r"(^.+)\\begin{tab", r"\1\n\\begin{tab", generation, flags=re.M) # Remove left-aligned empty LaTeX tabular blocks. generation = generation.replace(r"\begin{tabular}{l l} & \\ \end{tabular}", "") # Remove tabulars with just 2 newline characters. generation = generation.replace("\\begin{tabular}{}\n\n\\end{tabular}", "") return generation def post_process_single(self, generation: str, fix_markdown: bool = True) -> str: """ Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article authors. These expressions are commented for clarity and tested end-to-end in most cases. Args: generation (str): The generated text to be postprocessed. fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True. Returns: str: The postprocessed text. """ generation = re.sub( r"(?:\n|^)#+ \d*\W? ?(.{100,})", r"\n\1", generation ) # too long section titles probably are none generation = generation.strip() # Remove LaTeX left margin tag generation = generation.replace("\n* [leftmargin=*]\n", "\n") # Remove lines with markdown headings starting with #, with numerals, # and possibly roman numerals with trailing spaces and newlines generation = re.sub(r"^#+ (?:[\d+\.]+|[ixv\.]+)?\s*(?:$|\n\s*)", "", generation, flags=re.M) # most likely hallucinated titles lines = generation.split("\n") if lines[-1].startswith("#") and lines[-1].lstrip("#").startswith(" ") and len(lines) > 1: logger.info("Likely hallucinated title at the end of the page: " + lines[-1]) generation = "\n".join(lines[:-1]) # obvious repetition detection generation = truncate_repetitions(generation) # Reference corrections generation = self.remove_hallucinated_references(generation) # Remove lines starting with asterisks and numbers like "*[1]" and followed by capital letters and periods (ie too long references) generation = re.sub(r"^\* \[\d+\](\s?[A-W]\.+\s?){10,}.*$", "", generation, flags=re.M) # Remove empty brackets after a reference number in brackets. *[12][]ABC will become *[12]ABC generation = re.sub(r"^(\* \[\d+\])\[\](.*)$", r"\1\2", generation, flags=re.M) # Remove single characters before or after 2 new lines generation = re.sub(r"(^\w\n\n|\n\n\w$)", "", generation) # pmc math artifact correction generation = re.sub( r"([\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\s.,:()])", r"\1\(\2_{\3}\)\4", generation, ) generation = re.sub(r"([\s.,\d])_([a-zA-Z0-9])_([\s.,\d;])", r"\1\(\2\)\3", generation) # footnote mistakes generation = re.sub( r"(\nFootnote .*?:) (?:footnotetext|thanks):\W*(.*(?:\n\n|$))", r"\1 \2", generation, ) # TODO Come up with footnote formatting inside a table generation = re.sub(r"\[FOOTNOTE:.+?\](.*?)\[ENDFOOTNOTE\]", "", generation) # itemize post processing generation = normalize_list_like_lines(generation) if generation.endswith((".", "}")): generation += "\n\n" if re.match(r"[A-Z0-9,;:]$", generation): # add space in case it there is a comma or word ending generation += " " elif generation.startswith(("#", "**", "\\begin")): generation = "\n\n" + generation elif generation.split("\n")[-1].startswith(("#", "Figure", "Table")): generation = generation + "\n\n" else: try: last_word = generation.split(" ")[-1] if last_word in nltk.corpus.words.words(): generation += " " except LookupError: # add space just in case. Will split words but better than concatenating them generation += " " # table corrections generation = self.correct_tables(generation) # Remove optional, empty square brackets after begin{array} generation = generation.replace("\\begin{array}[]{", "\\begin{array}{") # Remove empty or malformed LaTeX tabular blocks with 2 or more columns specified, with spaces and ampersands. generation = re.sub( r"\\begin{tabular}{([clr ]){2,}}\s*[& ]*\s*(\\\\)? \\end{tabular}", "", generation, ) # Remove lines containing "S.A.B." one or more times. Was included in Nougat's code. generation = re.sub(r"(\*\*S\. A\. B\.\*\*\n+){2,}", "", generation) # Remove markdown-style headers that are incomplete or empty on multiple lines. generation = re.sub(r"^#+( [\[\d\w])?$", "", generation, flags=re.M) # Remove lines with just one period. generation = re.sub(r"^\.\s*$", "", generation, flags=re.M) # Replace instances of three or more newlines with just two newlines. generation = re.sub(r"\n{3,}", "\n\n", generation) if fix_markdown: return markdown_compatible(generation) else: return generation def post_process_generation( self, generation: Union[str, List[str]], fix_markdown: bool = True, num_workers: int = None, ) -> Union[str, List[str]]: """ Postprocess a generated text or a list of generated texts. This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. Args: generation (Union[str, List[str]]): The generated text or a list of generated texts. fix_markdown (`bool`, *optional*, defaults to `True`): Whether to perform Markdown formatting fixes. num_workers (`int`, *optional*): Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in parallel). Returns: Union[str, List[str]]: The postprocessed text or list of postprocessed texts. """ requires_backends(self, ["nltk", "levenshtein"]) if isinstance(generation, list): if num_workers is not None and isinstance(num_workers, int): with Pool(num_workers) as p: return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation) else: return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown)
class_definition
13,076
24,703
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/nougat/tokenization_nougat_fast.py
null
8,562
class NougatProcessor(ProcessorMixin): r""" Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. Args: image_processor ([`NougatImageProcessor`]): An instance of [`NougatImageProcessor`]. The image processor is a required input. tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__( self, images=None, text=None, do_crop_margin: bool = None, do_resize: bool = None, size: Dict[str, int] = None, resample: "PILImageResampling" = None, # noqa: F821 do_thumbnail: bool = None, do_align_long_axis: bool = None, do_pad: bool = None, do_rescale: bool = None, rescale_factor: Union[int, float] = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821 input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821 text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor( images, do_crop_margin=do_crop_margin, do_resize=do_resize, size=size, resample=resample, do_thumbnail=do_thumbnail, do_align_long_axis=do_align_long_axis, do_pad=do_pad, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, input_data_format=input_data_format, ) if text is not None: encodings = self.tokenizer( text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, ) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def post_process_generation(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.post_process_generation(*args, **kwargs)
class_definition
887
6,730
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/nougat/processing_nougat.py
null
8,563
class NougatImageProcessor(BaseImageProcessor): r""" Constructs a Nougat image processor. Args: do_crop_margin (`bool`, *optional*, defaults to `True`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Image standard deviation. """ model_input_names = ["pixel_values"] def __init__( self, do_crop_margin: bool = True, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_thumbnail: bool = True, do_align_long_axis: bool = False, do_pad: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 896, "width": 672} size = get_size_dict(size) self.do_crop_margin = do_crop_margin self.do_resize = do_resize self.size = size self.resample = resample self.do_thumbnail = do_thumbnail self.do_align_long_axis = do_align_long_axis self.do_pad = do_pad self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def python_find_non_zero(self, image: np.array): """This is a reimplementation of a findNonZero function equivalent to cv2.""" non_zero_indices = np.column_stack(np.nonzero(image)) idxvec = non_zero_indices[:, [1, 0]] idxvec = idxvec.reshape(-1, 1, 2) return idxvec def python_bounding_rect(self, coordinates): """This is a reimplementation of a BoundingRect function equivalent to cv2.""" min_values = np.min(coordinates, axis=(0, 1)).astype(int) max_values = np.max(coordinates, axis=(0, 1)).astype(int) x_min, y_min = min_values[0], min_values[1] width = max_values[0] - x_min + 1 height = max_values[1] - y_min + 1 return x_min, y_min, width, height def crop_margin( self, image: np.array, gray_threshold: int = 200, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). Args: image (`np.array`): The image to be cropped. gray_threshold (`int`, *optional*, defaults to `200`) Value below which pixels are considered to be gray. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = to_pil_image(image, input_data_format=input_data_format) data = np.array(image.convert("L")).astype(np.uint8) max_val = data.max() min_val = data.min() if max_val == min_val: image = np.array(image) image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image data = (data - min_val) / (max_val - min_val) * 255 gray = data < gray_threshold coords = self.python_find_non_zero(gray) x_min, y_min, width, height = self.python_bounding_rect(coords) image = image.crop((x_min, y_min, x_min + width, y_min + height)) image = np.array(image).astype(np.uint8) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.align_long_axis def align_long_axis( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Align the long axis of the image to the longest axis of the specified size. Args: image (`np.ndarray`): The image to be aligned. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The aligned image. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] if (output_width < output_height and input_width > input_height) or ( output_width > output_height and input_width < input_height ): image = np.rot90(image, 3) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def pad_image( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad the image to the specified size at the top, bottom, left and right. Args: image (`np.ndarray`): The image to be padded. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = size["height"], size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) delta_width = output_width - input_width delta_height = output_height - input_height pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = ((pad_top, pad_bottom), (pad_left, pad_right)) return pad(image, padding, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.thumbnail def thumbnail( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`np.ndarray`): The image to be resized. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] # We always resize to the smallest of either the input or output size. height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return resize( image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size) shortest_edge = min(size["height"], size["width"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) resized_image = resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return resized_image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_crop_margin: bool = None, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_thumbnail: bool = None, do_align_long_axis: bool = None, do_pad: bool = None, do_rescale: bool = None, rescale_factor: Union[int, float] = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], size["width"]) with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_crop_margin: images = [self.crop_margin(image, input_data_format=input_data_format) for image in images] if do_align_long_axis: images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_thumbnail: images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] if do_pad: images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,549
23,701
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/nougat/image_processing_nougat.py
null
8,564
class Swinv2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2 [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. pretrained_window_sizes (`list(int)`, *optional*, defaults to `[0, 0, 0, 0]`): Size of windows during pretraining. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. encoder_stride (`int`, *optional*, defaults to 32): Factor to increase the spatial resolution by in the decoder head for masked image modeling. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Example: ```python >>> from transformers import Swinv2Config, Swinv2Model >>> # Initializing a Swinv2 microsoft/swinv2-tiny-patch4-window8-256 style configuration >>> configuration = Swinv2Config() >>> # Initializing a model (with random weights) from the microsoft/swinv2-tiny-patch4-window8-256 style configuration >>> model = Swinv2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "swinv2" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, pretrained_window_sizes=[0, 0, 0, 0], mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, encoder_stride=32, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.pretrained_window_sizes = pretrained_window_sizes self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.encoder_stride = encoder_stride self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
class_definition
895
7,517
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/configuration_swinv2.py
null
8,565
class Swinv2EncoderOutput(ModelOutput): """ Swinv2 encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
2,124
4,093
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,566
class Swinv2ModelOutput(ModelOutput): """ Swinv2 model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
4,194
6,427
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,567
class Swinv2MaskedImageModelingOutput(ModelOutput): """ Swinv2 masked image model outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Masked image modeling (MLM) loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed pixel values. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @property def logits(self): warnings.warn( "logits attribute is deprecated and will be removed in version 5 of Transformers." " Please use the reconstruction attribute to retrieve the final output instead.", FutureWarning, ) return self.reconstruction
class_definition
6,542
8,957
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,568
class Swinv2ImageClassifierOutput(ModelOutput): """ Swinv2 outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
9,068
11,208
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,569
class Swinv2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
13,456
13,936
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,570
class Swinv2Embeddings(nn.Module): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = Swinv2PatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> Tuple[torch.Tensor]: _, num_channels, height, width = pixel_values.shape embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions
class_definition
14,025
17,938
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,571
class Swinv2PatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions
class_definition
18,032
20,214
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,572
class Swinv2PatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(2 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # [batch_size, height/2 * width/2, 4*num_channels] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C] input_feature = self.reduction(input_feature) input_feature = self.norm(input_feature) return input_feature
class_definition
20,217
22,509
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,573
class Swinv2SelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.pretrained_window_size = pretrained_window_size self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.continuous_position_bias_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float() relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float() relative_coords_table = ( torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij")) .permute(1, 2, 0) .contiguous() .unsqueeze(0) ) # [1, 2*window_height - 1, 2*window_width - 1, 2] if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1 relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1 elif window_size > 1: relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = ( torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8) ) # set to same dtype as mlp weight relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # cosine attention attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize( key_layer, dim=-1 ).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp() attention_scores = attention_scores * logit_scale relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view( -1, self.num_attention_heads ) # [window_height*window_width,window_height*window_width,num_attention_heads] relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # [num_attention_heads,window_height*window_width,window_height*window_width] relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in Swinv2Model forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class_definition
22,512
29,493
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,574
class Swinv2SelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
29,582
30,021
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,575
class Swinv2Attention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0): super().__init__() self.self = Swinv2SelfAttention( config=config, dim=dim, num_heads=num_heads, window_size=window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.output = Swinv2SelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
30,024
32,027
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,576
class Swinv2Intermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
32,118
32,678
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,577
class Swinv2Output(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
32,763
33,184
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,578
class Swinv2Layer(nn.Module): def __init__( self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0 ): super().__init__() self.input_resolution = input_resolution window_size, shift_size = self._compute_window_shift( (config.window_size, config.window_size), (shift_size, shift_size) ) self.window_size = window_size[0] self.shift_size = shift_size[0] self.attention = Swinv2Attention( config=config, dim=dim, num_heads=num_heads, window_size=self.window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.drop_path = Swinv2DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.intermediate = Swinv2Intermediate(config, dim) self.output = Swinv2Output(config, dim) self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) def _compute_window_shift(self, target_window_size, target_shift_size) -> Tuple[Tuple[int, int], Tuple[int, int]]: window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return window_size, shift_size def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for shifted window multihead self attention img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states # pad hidden_states to multiples of window size hidden_states = hidden_states.view(batch_size, height, width, channels) hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = self.layernorm_before(attention_windows) hidden_states = shortcut + self.drop_path(hidden_states) layer_output = self.intermediate(hidden_states) layer_output = self.output(layer_output) layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output)) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs
class_definition
33,187
39,128
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,579
class Swinv2Stage(nn.Module): def __init__( self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0 ): super().__init__() self.config = config self.dim = dim blocks = [] for i in range(depth): block = Swinv2Layer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if (i % 2 == 0) else config.window_size // 2, pretrained_window_size=pretrained_window_size, ) blocks.append(block) self.blocks = nn.ModuleList(blocks) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs
class_definition
39,131
41,424
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,580
class Swinv2Encoder(nn.Module): def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)): super().__init__() self.num_layers = len(config.depths) self.config = config if self.config.pretrained_window_sizes is not None: pretrained_window_sizes = config.pretrained_window_sizes dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] layers = [] for i_layer in range(self.num_layers): stage = Swinv2Stage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=Swinv2PatchMerging if (i_layer < self.num_layers - 1) else None, pretrained_window_size=pretrained_window_sizes[i_layer], ) layers.append(stage) self.layers = nn.ModuleList(layers) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, Swinv2EncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, input_dimensions, layer_head_mask ) else: layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None ) return Swinv2EncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, )
class_definition
41,427
46,347
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,581
class Swinv2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Swinv2Config base_model_prefix = "swinv2" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["Swinv2Stage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
46,454
47,413
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,582
class Swinv2Model(Swinv2PreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = Swinv2Embeddings(config, use_mask_token=use_mask_token) self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Swinv2ModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, Swinv2ModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return Swinv2ModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, )
class_definition
49,563
53,744
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,583
class Swinv2ForMaskedImageModeling(Swinv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.swinv2 = Swinv2Model(config, add_pooling_layer=False, use_mask_token=True) num_features = int(config.embed_dim * 2 ** (config.num_layers - 1)) self.decoder = nn.Sequential( nn.Conv2d( in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1 ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Swinv2MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, Swinv2MaskedImageModelingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 256, 256] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swinv2( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output.transpose(1, 2) batch_size, num_channels, sequence_length = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return Swinv2MaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, )
class_definition
54,340
59,064
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,584
class Swinv2ForImageClassification(Swinv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.swinv2 = Swinv2Model(config) # Classifier head self.classifier = ( nn.Linear(self.swinv2.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=Swinv2ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, Swinv2ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swinv2( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return Swinv2ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, )
class_definition
59,742
63,385
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,585
class Swinv2Backbone(Swinv2PreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] self.embeddings = Swinv2Embeddings(config) self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid) # initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> model = AutoBackbone.from_pretrained( ... "microsoft/swinv2-tiny-patch4-window8-256", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 2048, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output, input_dimensions = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, input_dimensions, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=return_dict, ) hidden_states = outputs.reshaped_hidden_states if return_dict else outputs[-1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs[1],) if output_attentions: output += (outputs[2],) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
class_definition
63,532
66,847
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swinv2/modeling_swinv2.py
null
8,586
class RegNetConvLayer(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, activation: Optional[str] = "relu", ): super().__init__() self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False, ) self.normalization = nn.BatchNorm2d(out_channels) self.activation = ACT2FN[activation] if activation is not None else nn.Identity() def forward(self, hidden_state): hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state
class_definition
1,625
2,530
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,587
class RegNetEmbeddings(nn.Module): """ RegNet Embedddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: RegNetConfig): super().__init__() self.embedder = RegNetConvLayer( config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act ) self.num_channels = config.num_channels def forward(self, pixel_values): num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = self.embedder(pixel_values) return hidden_state
class_definition
2,533
3,313
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,588
class RegNetShortCut(nn.Module): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2): super().__init__() self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False) self.normalization = nn.BatchNorm2d(out_channels) def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) return hidden_state
class_definition
3,408
4,059
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,589
class RegNetSELayer(nn.Module): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). """ def __init__(self, in_channels: int, reduced_channels: int): super().__init__() self.pooler = nn.AdaptiveAvgPool2d((1, 1)) self.attention = nn.Sequential( nn.Conv2d(in_channels, reduced_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_channels, in_channels, kernel_size=1), nn.Sigmoid(), ) def forward(self, hidden_state): # b c h w -> b c 1 1 pooled = self.pooler(hidden_state) attention = self.attention(pooled) hidden_state = hidden_state * attention return hidden_state
class_definition
4,062
4,839
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,590
class RegNetXLayer(nn.Module): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state
class_definition
4,842
6,092
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,591
class RegNetYLayer(nn.Module): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4))), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state
class_definition
6,095
7,383
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,592
class RegNetStage(nn.Module): """ A RegNet stage composed by stacked layers. """ def __init__( self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, ): super().__init__() layer = RegNetXLayer if config.layer_type == "x" else RegNetYLayer self.layers = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( config, in_channels, out_channels, stride=stride, ), *[layer(config, out_channels, out_channels) for _ in range(depth - 1)], ) def forward(self, hidden_state): hidden_state = self.layers(hidden_state) return hidden_state
class_definition
7,386
8,219
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,593
class RegNetEncoder(nn.Module): def __init__(self, config: RegNetConfig): super().__init__() self.stages = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]): self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth)) def forward( self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
class_definition
8,222
9,763
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,594
class RegNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" _no_split_modules = ["RegNetYLayer"] # Copied from transformers.models.resnet.modeling_resnet.ResNetPreTrainedModel._init_weights def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") # copied from the `reset_parameters` method of `class Linear(Module)` in `torch`. elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0)
class_definition
9,766
10,975
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,595
class RegNetModel(RegNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = RegNetEmbeddings(config) self.encoder = RegNetEncoder(config) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
12,464
14,204
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,596
class RegNetForImageClassification(RegNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.regnet = RegNetModel(config) # classification head self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
class_definition
14,542
17,686
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/modeling_regnet.py
null
8,597
class RegNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RegNet [facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. embedding_size (`int`, *optional*, defaults to 64): Dimensionality (hidden size) for the embedding layer. hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): Depth (number of layers) for each stage. layer_type (`str`, *optional*, defaults to `"y"`): The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with `reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the paper for a detailed explanation of how these layers were constructed. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. downsample_in_first_stage (`bool`, *optional*, defaults to `False`): If `True`, the first stage will downsample the inputs using a `stride` of 2. Example: ```python >>> from transformers import RegNetConfig, RegNetModel >>> # Initializing a RegNet regnet-y-40 style configuration >>> configuration = RegNetConfig() >>> # Initializing a model from the regnet-y-40 style configuration >>> model = RegNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "regnet" layer_types = ["x", "y"] def __init__( self, num_channels=3, embedding_size=32, hidden_sizes=[128, 192, 512, 1088], depths=[2, 6, 12, 2], groups_width=64, layer_type="y", hidden_act="relu", **kwargs, ): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.groups_width = groups_width self.layer_type = layer_type self.hidden_act = hidden_act # always downsample in the first stage self.downsample_in_first_stage = True
class_definition
808
3,944
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/configuration_regnet.py
null
8,598
class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
class_definition
1,329
2,173
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py
null
8,599