text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class AriaGroupedExpertsGemm(nn.Module): """ Grouped GEMM (General Matrix Multiplication) module for efficient expert computation. This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm) for optimized performance. If the grouped_gemm library is not installed, it gracefully falls back to a sequential GEMM implementation, which may be slower but ensures functionality. Args: in_features (`int`): Number of input features. out_features (`int`): Number of output features. groups (`int`): Number of expert groups. """ def __init__(self, in_features, out_features, groups): super().__init__() self.in_features = in_features self.out_features = out_features self.groups = groups self.weight = nn.Parameter(torch.empty(groups, in_features, out_features)) def forward(self, input, tokens_per_expert): """ Perform grouped matrix multiplication. Args: input (`torch.Tensor`): Input tensor of shape (num_tokens, in_features). tokens_per_expert (`torch.Tensor`): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor of shape (num_tokens, out_features). """ return sequential_experts_gemm( input, self.weight, tokens_per_expert.cpu(), )
class_definition
11,046
12,537
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,200
class AriaGroupedExpertsMLP(nn.Module): """ Grouped MLP module for Mixture of Experts. Args: config (`AriaTextConfig`): Configuration object for the model. """ def __init__(self, config: AriaTextConfig) -> None: super().__init__() self.config = config self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts) self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts) def forward(self, permuted_tokens, tokens_per_expert): """ Forward pass of the Grouped MLP. Args: permuted_tokens (torch.Tensor): Permuted input tokens. tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor after passing through the MLP. """ fc1_output = self.fc1(permuted_tokens, tokens_per_expert) projection, gate = torch.chunk(fc1_output, 2, dim=-1) fc1_output = nn.functional.silu(projection) * gate fc2_output = self.fc2(fc1_output, tokens_per_expert) return fc2_output
class_definition
12,540
13,735
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,201
class AriaTextMoELayer(nn.Module): """ Aria Text Mixture of Experts (MoE) Layer. This layer applies a gating mechanism to route input tokens to different experts. Args: config (`AriaTextConfig`): Configuration object for the text component of the model. """ def __init__(self, config: AriaTextConfig): super().__init__() self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False) self.experts = AriaGroupedExpertsMLP(config) self.shared_experts = AriaSharedExpertsMLP(config) self.config = config def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ Forward pass of the MoE Layer. Args: hidden_states (`torch.Tensor`): Input tensor of shape (batch_size, sequence_length, hidden_size). Returns: torch.Tensor: Output tensor after passing through the MoE layer. Process: 1. Route tokens to experts using the router. 2. Permute tokens based on routing decisions. 3. Process tokens through experts. 4. Unpermute and combine expert outputs. 5. Add shared expert output to the final result. """ original_shape = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_states.size(-1)) # Top K Routing logits = self.router(hidden_states) top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1) scores = nn.functional.softmax(top_logits, dim=-1) original_dtype = top_indices.dtype tokens_per_expert = torch.histc( top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1, ).to(original_dtype) indices = top_indices # Token permutation flatten_indices = indices.view(-1) sorted_indices = torch.argsort(flatten_indices) permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk) # Process through experts expert_output = self.experts(permuted_tokens, tokens_per_expert) # Token unpermutation unpermuted_tokens = torch.zeros( (scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device, ) unpermuted_tokens.index_copy_(0, sorted_indices, expert_output) unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1)) output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape) # Add shared expert output shared_expert_output = self.shared_experts(hidden_states.view(original_shape)) return output + shared_expert_output
class_definition
13,915
16,806
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,202
class AriaTextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: AriaTextConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
class_definition
20,085
23,658
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,203
class AriaTextDecoderLayer(nn.Module): """ Aria Text Decoder Layer. This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network. Args: config (`AriaTextConfig`): Configuration object for the text component of the model. layer_idx (`int`): Index of the layer. """ def __init__(self, config: AriaTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx) self.mlp = AriaTextMoELayer(config) self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class_definition
23,661
26,124
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,204
class AriaTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AriaConfig base_model_prefix = "model" _no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"] supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = False _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, AriaGroupedExpertsGemm): module.weight.data.normal_(mean=0.0, std=std) elif isinstance(module, nn.Conv2d): module.weight.data.normal_(mean=0.0, std=std) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_()
class_definition
26,127
27,446
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,205
class AriaPreTrainedModel(PreTrainedModel): config_class = AriaTextConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["AriaDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, AriaProjector): nn.init.trunc_normal_(module.query, std=std)
class_definition
28,478
29,507
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,206
class AriaTextRotaryEmbedding(nn.Module): def __init__(self, config: AriaTextConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
29,510
32,711
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,207
class AriaTextModel(AriaTextPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`AriaTextDecoderLayer`] Args: config: AriaTextConfig """ def __init__(self, config: AriaTextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = AriaTextRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(ARIA_TEXT_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
37,526
48,779
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,208
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
class_definition
48,782
48,844
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,209
class AriaTextForCausalLM(AriaTextPreTrainedModel, GenerationMixin): """ Aria model for causal language modeling tasks. This class extends `LlamaForCausalLM` to incorporate the Mixture of Experts (MoE) approach, allowing for more efficient and scalable language modeling. Args: config (`AriaTextConfig`): Configuration object for the model. """ _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} config_class = AriaTextConfig def __init__(self, config: AriaTextConfig): super().__init__(config) self.model = AriaTextModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(ARIA_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from transformers import AutoTokenizer, AriaTextForCausalLM >>> model = AriaTextForCausalLM.from_pretrained("meta-aria_text/AriaText-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-aria_text/AriaText-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
48,847
54,380
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,210
class AriaCausalLMOutputWithPast(ModelOutput): """ Base class for Aria causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
class_definition
54,394
56,978
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,211
class AriaForConditionalGeneration(AriaPreTrainedModel, GenerationMixin): config_class = AriaConfig _supports_flash_attn_2 = False _supports_sdpa = False _tied_weights_keys = ["language_model.lm_head.weight"] def __init__(self, config: AriaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = AriaProjector(config) self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config(config.text_config) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self._use_flash_attention_2 = config.text_config._attn_implementation == "flash_attention_2" self.post_init() def _create_patch_attention_mask(self, pixel_mask): if pixel_mask is None: return None patches_subgrid = pixel_mask.unfold( dimension=1, size=self.vision_tower.config.patch_size, step=self.vision_tower.config.patch_size, ) patches_subgrid = patches_subgrid.unfold( dimension=2, size=self.vision_tower.config.patch_size, step=self.vision_tower.config.patch_size, ) return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def get_image_features( self, pixel_values: torch.FloatTensor, pixel_mask: torch.FloatTensor = None, vision_feature_layer: int = -1, ): patch_attention_mask = self._create_patch_attention_mask(pixel_mask) image_outputs = self.vision_tower( pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True ) image_attn_mask = None if patch_attention_mask is not None: flattened_mask = patch_attention_mask.flatten(1) image_attn_mask = torch.logical_not(flattened_mask) selected_image_feature = image_outputs.hidden_states[vision_feature_layer] image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask) return image_features @add_start_docstrings_to_model_forward(ARIA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AriaCausalLMOutputWithPast, config_class=AriaConfig) def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, pixel_mask: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, num_logits_to_keep: int = 0, cache_position: Optional[torch.LongTensor] = None, **loss_kwargs, ) -> Union[Tuple, AriaCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`). Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModel >>> from transformers.image_utils import load_image >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") >>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria") >>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", torch_dtype=torch.bfloat16, device_map="auto") >>> # Create inputs >>> messages = [ ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."}, ... {"type": "image"}, ... {"type": "text", "text": "What can we see in this image?"}, ... ] ... }, ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In which city is that bridge located?"}, ... ] ... } ... ] >>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages] >>> images = [[image1, image2], [image3]] >>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=256) >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generated_texts[0]) Assistant: There are buildings, trees, lights, and water visible in this image. >>> print(generated_texts[1]) Assistant: The bridge is in San Francisco. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) # 2. Merge text and images if pixel_values is not None and inputs_embeds.shape[1] != 1: if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_index, dtype=torch.long, device=inputs_embeds.device) ) n_image_tokens = (special_image_mask).sum(dim=1).sum(dim=0)[0] else: image_embeds = input_ids == self.config.image_token_index special_image_mask = image_embeds.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_tokens = (image_embeds).sum(dim=1).sum(dim=0) image_features = self.get_image_features( pixel_values=pixel_values, pixel_mask=pixel_mask, vision_feature_layer=self.config.vision_feature_layer, ) n_images, n_features_per_image = image_features.shape[0], image_features.shape[1] n_image_features = n_images * n_features_per_image if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, num_logits_to_keep=num_logits_to_keep, ) logits = outputs[0] loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **loss_kwargs ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return AriaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, num_logits_to_keep=None, **kwargs, ): model_inputs = self.language_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, num_logits_to_keep=num_logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values model_inputs["pixel_mask"] = pixel_mask return model_inputs
class_definition
59,618
70,390
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/modeling_aria.py
null
7,212
class AriaImageProcessor(BaseImageProcessor): """ A vision processor for the Aria model that handles image preprocessing. Initialize the AriaImageProcessor. Args: image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]): Mean values for normalization. image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]): Standard deviation values for normalization. max_image_size (`int`, *optional*, defaults to 980): Maximum image size. min_image_size (`int`, *optional*, defaults to 336): Minimum image size. split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples): The optimal resolutions for splitting the image. split_image (`bool`, *optional*, defaults to `False`): Whether to split the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. resample (PILImageResampling, *optional*, defaults to `BICUBIC`): The resampling filter to use if resizing the image. """ def __init__( self, image_mean: List[float] = None, image_std: List[float] = None, max_image_size: int = 980, min_image_size: int = 336, split_resolutions: Optional[List[Tuple[int, int]]] = None, split_image: Optional[bool] = False, do_convert_rgb: Optional[bool] = True, do_normalize: Optional[bool] = True, resample: PILImageResampling = PILImageResampling.BICUBIC, **kwargs, ): super().__init__(**kwargs) if image_mean is None: image_mean = [0.5, 0.5, 0.5] if image_std is None: image_std = [0.5, 0.5, 0.5] self.max_image_size = max_image_size self.min_image_size = min_image_size self.image_mean = image_mean self.image_std = image_std self.split_image = split_image if split_resolutions is None: split_resolutions = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1), (7, 1), (8, 1)] # fmt: skip split_resolutions = [(el[0] * 490, el[1] * 490) for el in split_resolutions] self.split_resolutions = split_resolutions self.do_convert_rgb = do_convert_rgb self.do_normalize = do_normalize self.resample = resample def preprocess( self, images: Union[ImageInput, List[ImageInput]], image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, max_image_size: Optional[int] = None, min_image_size: Optional[int] = None, split_image: Optional[bool] = None, do_convert_rgb: Optional[bool] = None, do_normalize: Optional[bool] = None, resample: PILImageResampling = None, return_tensors: Optional[Union[str, TensorType]] = "pt", data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Process a list of images. Args: images (ImageInput or list of ImageInput): The input image or a list of images. image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]): Mean values for normalization. image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]): Standard deviation values for normalization. max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)): Maximum image size. min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)): Minimum image size. split_image (`bool`, *optional*, defaults to `self.split_image` (False)): Whether to split the image. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)): Whether to normalize the image. resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)): The resampling filter to use if resizing the image. return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"): The type of tensor to return. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: BatchFeature: A BatchFeature object containing: - 'pixel_values': Tensor of processed image pixel values. - 'pixel_mask': Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where: - True (1) values indicate pixels that belong to the original resized image. - False (0) values indicate pixels that are part of the padding. The mask helps distinguish between actual image content and padded areas in subsequent processing steps. - 'num_crops': The maximum number of crops across all images. """ image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std max_image_size = max_image_size if max_image_size is not None else self.max_image_size min_image_size = min_image_size if min_image_size is not None else self.min_image_size split_image = split_image if split_image is not None else self.split_image do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample if max_image_size not in [490, 980]: raise ValueError("max_image_size must be either 490 or 980") images = make_batched_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, resample=resample, ) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) pixel_values = [] pixel_masks = [] num_crops = None for image in images: if split_image: crop_images = self.get_image_patches( image, self.split_resolutions, max_image_size, resample, data_format=input_data_format, input_data_format=input_data_format, ) else: crop_images = [image] if num_crops is None or len(crop_images) > num_crops: num_crops = len(crop_images) for crop_image in crop_images: # At this point the scale is the rescaling factor that would bring the image to max_size in its larger dimension h, w = get_image_size(crop_image) scale = max_image_size / max(h, w) if w >= h: new_size = (max(int(h * scale), min_image_size), max_image_size) # h, w else: new_size = (max_image_size, max(int(w * scale), min_image_size)) # h, w crop_image_resized = resize( crop_image, new_size, resample=resample, data_format=input_data_format, input_data_format=input_data_format, ) padding_bottom, padding_right = max_image_size - new_size[0], max_image_size - new_size[1] crop_image_padded = pad( crop_image_resized, ((0, padding_bottom), (0, padding_right)), data_format=input_data_format, input_data_format=input_data_format, ) # Create a pixel mask pixel_mask = np.zeros((max_image_size, max_image_size), dtype=bool) pixel_mask[: new_size[0], : new_size[1]] = 1 pixel_masks.append(pixel_mask) if do_normalize: crop_image_padded = self.normalize( crop_image_padded / 255.0, self.image_mean, self.image_std, data_format=input_data_format, input_data_format=input_data_format, ) crop_image_padded = ( to_channel_dimension_format(crop_image_padded, data_format, input_data_format) if data_format is not None else crop_image_padded ) pixel_values.append(crop_image_padded) return BatchFeature( data={ "pixel_values": np.stack(pixel_values, axis=0), "pixel_mask": np.stack(pixel_masks, axis=0), "num_crops": num_crops, }, tensor_type=return_tensors, ) def _resize_for_patching( self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension ) -> np.array: """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. """ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format) # Resize the image resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format) return resized_image def _pad_for_patching( self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension ) -> np.array: """ Pad an image to a target resolution while maintaining aspect ratio. """ target_height, target_width = target_resolution new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format) paste_x = (target_width - new_width) // 2 paste_y = (target_height - new_height) // 2 padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x))) return padded_image def pad( self, image: np.ndarray, padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]], mode: PaddingMode = PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]] = 0.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim if isinstance(padding, int) or len(padding) != 4: return pad(image, padding, mode, constant_values, data_format, input_data_format) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) padding_mode_mapping = { PaddingMode.CONSTANT: "constant", PaddingMode.REFLECT: "reflect", PaddingMode.REPLICATE: "edge", PaddingMode.SYMMETRIC: "symmetric", } image = np.pad(image, padding, mode=padding_mode_mapping[mode], constant_values=constant_values) image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image def get_image_patches( self, image: np.array, grid_pinpoints: List[Tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension, ) -> List[np.array]: """ Process an image with variable resolutions by dividing it into patches. Args: image (`np.array`): The input image to be processed. grid_pinpoints (List[Tuple[int, int]]): A list of possible resolutions as tuples. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: `List[np.array]`: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError("grid_pinpoints must be a list of possible resolutions.") possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=input_data_format) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching( image, best_resolution, resample=resample, input_data_format=input_data_format ) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format) patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format) # make sure that all patches are in the input data format patches = [ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches ] return patches
class_definition
4,320
22,897
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/image_processing_aria.py
null
7,213
class AriaProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, }, "images_kwargs": { "max_image_size": 980, "split_image": False, }, "return_tensors": TensorType.PYTORCH, }
class_definition
1,809
2,109
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/processing_aria.py
null
7,214
class AriaProcessor(ProcessorMixin): """ AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer. Args: image_processor (`AriaImageProcessor`, *optional*): The AriaImageProcessor to use for image preprocessing. tokenizer (`PreTrainedTokenizerBase`, *optional*): An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. size_conversion (`Dict`, *optional*): A dictionary indicating size conversions for images. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = ["chat_template", "size_conversion"] image_processor_class = "AriaImageProcessor" tokenizer_class = "AutoTokenizer" def __init__( self, image_processor=None, tokenizer: Union[AutoTokenizer, str] = None, chat_template: Optional[str] = None, size_conversion: Optional[Dict[Union[float, int], int]] = None, ): if size_conversion is None: size_conversion = {490: 128, 980: 256} self.size_conversion = {int(k): v for k, v in size_conversion.items()} if tokenizer is not None and tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.unk_token super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], images: Optional[ImageInput] = None, audio=None, videos=None, **kwargs: Unpack[AriaProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). Args: text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( AriaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") if images is not None: image_inputs = self.image_processor( images, **output_kwargs["images_kwargs"], ) # expand the image_token according to the num_crops and tokens per image tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]] prompt_strings = [] num_crops = image_inputs.pop("num_crops") * tokens_per_image for sample in text: sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops) prompt_strings.append(sample) else: image_inputs = {} prompt_strings = text text_inputs = self.tokenizer( prompt_strings, **output_kwargs["text_kwargs"], ) return BatchFeature(data={**text_inputs, **image_inputs}) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class_definition
2,112
7,628
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/processing_aria.py
null
7,215
class AriaTextConfig(PretrainedConfig): r""" This class handles the configuration for the text component of the Aria model. Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria [rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture. This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LlamaModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 4096): The size of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens, Llama 2 up to 4096, CodeLlama up to 16384. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 2): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. pretraining_tp (`int`, *optional*, defaults to 1): Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is necessary to ensure exact reproducibility of the pretraining results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. head_dim (`int`, *optional*): The attention head dimension. If None, it will default to hidden_size // num_heads moe_num_experts (`int`, *optional*, defaults to 8): The number of experts in the MoE layer. moe_topk (`int`, *optional*, defaults to 2): The number of top experts to route to for each token. moe_num_shared_experts (`int`, *optional*, defaults to 2): The number of shared experts. """ model_type = "aria_text" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `AriaTextModel` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_config_key = "text_config" def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size: int = 4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act="silu", max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=2, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, moe_num_experts: int = 8, moe_topk: int = 2, moe_num_shared_experts: int = 2, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.mlp_bias = mlp_bias self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, copy it it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) self.moe_num_experts = moe_num_experts self.moe_topk = moe_topk self.moe_num_shared_experts = moe_num_shared_experts
class_definition
1,652
12,483
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/configuration_aria.py
null
7,216
class AriaConfig(PretrainedConfig): r""" This class handles the configuration for both vision and text components of the Aria model, as well as additional parameters for image token handling and projector mapping. Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria [rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`AriaVisionConfig` or `dict`, *optional*): Configuration for the vision component. vision_feature_layer (`int`, *optional*, defaults to -1): The index of the layer to select the vision feature. text_config (`AriaTextConfig` or `dict`, *optional*): Configuration for the text component. projector_patch_to_query_dict (`dict`, *optional*): Mapping of patch sizes to query dimensions. image_token_index (`int`, *optional*, defaults to 9): Index used to represent image tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal initializer for initializing all weight matrices. Attributes: model_type (`str`): Type of the model, set to `"aria"`. image_token_index (`int`): Index used to represent image tokens. projector_patch_to_query_dict (`dict`): Mapping of patch sizes to query dimensions. vision_config (`AriaVisionConfig`): Configuration for the vision component. text_config (`AriaTextConfig`): Configuration for the text component. """ model_type = "aria" sub_configs = {"text_config": AriaTextConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, vision_feature_layer: int = -1, text_config: AriaTextConfig = None, projector_patch_to_query_dict: Dict = None, image_token_index: int = 9, initializer_range: float = 0.02, **kwargs, ): self.image_token_index = image_token_index # Convert the keys and values of projector_patch_to_query_dict to integers # This ensures consistency even if they were provided as strings if projector_patch_to_query_dict is None: projector_patch_to_query_dict = { 1225: 128, 4900: 256, } self.projector_patch_to_query_dict = {int(k): int(v) for k, v in projector_patch_to_query_dict.items()} self.max_value_projector_patch_to_query_dict = max(self.projector_patch_to_query_dict.values()) self.vision_feature_layer = vision_feature_layer if isinstance(vision_config, dict): vision_config["model_type"] = "idefics3_vision" vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["idefics3_vision"]() self.vision_config = vision_config self.initializer_range = initializer_range if isinstance(text_config, dict) and "model_type" in text_config: text_config = AriaTextConfig(**text_config) elif text_config is None: text_config = AriaTextConfig() self.text_config = text_config super().__init__(**kwargs)
class_definition
12,486
16,059
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/aria/configuration_aria.py
null
7,217
class OwlViTProcessor(ProcessorMixin): r""" Constructs an OWL-ViT processor which wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that interits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`OwlViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "OwlViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, query_images=None, padding="max_length", return_tensors="np", **kwargs): """ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(text, str) or (isinstance(text, List) and not isinstance(text[0], List)): encodings = [self.tokenizer(text, padding=padding, return_tensors=return_tensors, **kwargs)] elif isinstance(text, List) and isinstance(text[0], List): encodings = [] # Maximum number of queries across batch max_num_queries = max([len(t) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(t) != max_num_queries: t = t + [" "] * (max_num_queries - len(t)) encoding = self.tokenizer(t, padding=padding, return_tensors=return_tensors, **kwargs) encodings.append(encoding) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp input_ids = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "pt" and is_torch_available(): import torch input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0) attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf input_ids = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0) else: raise ValueError("Target return tensor type could not be returned") encoding = BatchEncoding() encoding["input_ids"] = input_ids encoding["attention_mask"] = attention_mask if query_images is not None: encoding = BatchEncoding() query_pixel_values = self.image_processor( query_images, return_tensors=return_tensors, **kwargs ).pixel_values encoding["query_pixel_values"] = query_pixel_values if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif query_images is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def post_process(self, *args, **kwargs): """ This method forwards all its arguments to [`OwlViTImageProcessor.post_process`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process(*args, **kwargs) def post_process_object_detection(self, *args, **kwargs): """ This method forwards all its arguments to [`OwlViTImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. """ warnings.warn( "`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. " "Use `post_process_grounded_object_detection` instead.", FutureWarning, ) return self.image_processor.post_process_object_detection(*args, **kwargs) def post_process_grounded_object_detection( self, outputs: "OwlViTObjectDetectionOutput", threshold: float = 0.1, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, text_labels: Optional[List[List[str]]] = None, ): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`List[List[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. """ output = self.image_processor.post_process_object_detection( outputs=outputs, threshold=threshold, target_sizes=target_sizes ) if text_labels is not None and len(text_labels) != len(output): raise ValueError("Make sure that you pass in as many lists of text labels as images") # adding text labels to the output if text_labels is not None: for image_output, image_text_labels in zip(output, text_labels): object_text_labels = [image_text_labels[i] for i in image_output["labels"]] image_output["text_labels"] = object_text_labels else: for image_output in output: image_output["text_labels"] = None return output def post_process_image_guided_detection( self, outputs: "OwlViTImageGuidedObjectDetectionOutput", threshold: float = 0.0, nms_threshold: float = 0.3, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, ): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. """ return self.image_processor.post_process_image_guided_detection( outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes ) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
class_definition
1,064
15,276
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/processing_owlvit.py
null
7,218
class OwlViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OwlViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OwlViTTextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import OwlViTTextConfig, OwlViTTextModel >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTTextConfig() >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration >>> model = OwlViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_text_model" base_config_key = "text_config" def __init__( self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class_definition
1,022
5,737
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/configuration_owlvit.py
null
7,219
class OwlViTVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTVisionConfig() >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration >>> model = OwlViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class_definition
5,740
9,782
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/configuration_owlvit.py
null
7,220
class OwlViTConfig(PretrainedConfig): r""" [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWL-ViT implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "owlvit" sub_configs = {"text_config": OwlViTTextConfig, "vision_config": OwlViTVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.") self.text_config = OwlViTTextConfig(**text_config) self.vision_config = OwlViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): r""" Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision model configuration. Returns: [`OwlViTConfig`]: An instance of a configuration object """ config_dict = {} config_dict["text_config"] = text_config config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs)
class_definition
9,785
12,848
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/configuration_owlvit.py
null
7,221
class OwlViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, processor: "ProcessorMixin", batch_size: int = -1, seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs( processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework ) image_input_dict = super().generate_dummy_inputs( processor.image_processor, batch_size=batch_size, framework=framework ) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14
class_definition
12,851
14,322
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/configuration_owlvit.py
null
7,222
class OwlViTOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. text_model_output (Tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
2,202
4,116
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,223
class OwlViTObjectDetectionOutput(ModelOutput): """ Output type of [`OwlViTForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (Tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None class_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
6,770
9,622
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,224
class OwlViTImageGuidedObjectDetectionOutput(ModelOutput): """ Output type of [`OwlViTForObjectDetection.image_guided_detection`]. Args: logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual target image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual query image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (Tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ logits: torch.FloatTensor = None image_embeds: torch.FloatTensor = None query_image_embeds: torch.FloatTensor = None target_pred_boxes: torch.FloatTensor = None query_pred_boxes: torch.FloatTensor = None class_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
9,636
12,563
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,225
class OwlViTVisionEmbeddings(nn.Module): def __init__(self, config: OwlViTVisionConfig): super().__init__() self.patch_size = config.patch_size self.config = config self.embed_dim = config.hidden_size self.class_embedding = nn.Parameter(torch.randn(config.hidden_size)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=config.patch_size, stride=config.patch_size, bias=False, ) self.num_patches = (config.image_size // config.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) # shape = [batch_size, num_channels, height, width] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class_definition
12,566
16,146
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,226
class OwlViTTextEmbeddings(nn.Module): def __init__(self, config: OwlViTTextConfig): super().__init__() self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class_definition
16,149
17,355
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,227
class OwlViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # For int8 compatibility, sometimes the `attn_probs` are in `fp32` attn_probs = attn_probs.to(value_states.dtype) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped
class_definition
17,358
22,252
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,228
class OwlViTMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
22,334
22,906
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,229
class OwlViTEncoderLayer(nn.Module): def __init__(self, config: OwlViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OwlViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = OwlViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
23,009
24,962
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,230
class OwlViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = OwlViTConfig base_model_prefix = "owlvit" supports_gradient_checkpointing = True _no_split_modules = ["OwlViTEncoderLayer"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, OwlViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, OwlViTVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, OwlViTAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, OwlViTMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, OwlViTModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class_definition
24,965
27,656
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,231
class OwlViTEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`OwlViTEncoderLayer`]. Args: config: OwlViTConfig """ def __init__(self, config: OwlViTConfig): super().__init__() self.layers = nn.ModuleList([OwlViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
34,907
38,950
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,232
class OwlViTTextTransformer(nn.Module): def __init__(self, config: OwlViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = OwlViTTextEmbeddings(config) self.encoder = OwlViTEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTTextConfig) def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # num_samples, seq_len = input_shape where num_samples = batch_size * num_max_text_queries # OWLVIT's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [num_samples, seq_len] -> [num_samples, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # take features from the end of tokens embedding (end of token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(torch.int).argmax(dim=-1).to(last_hidden_state.device), ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
38,953
42,364
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,233
class OwlViTTextModel(OwlViTPreTrainedModel): config_class = OwlViTTextConfig def __init__(self, config: OwlViTTextConfig): super().__init__(config) self.text_model = OwlViTTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTTextConfig) def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, OwlViTTextModel >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" # Get embeddings for all text queries in all batch samples return self.text_model( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
42,367
44,349
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,234
class OwlViTVisionTransformer(nn.Module): def __init__(self, config: OwlViTVisionConfig): super().__init__() self.config = config self.embeddings = OwlViTVisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.encoder = OwlViTEncoder(config) self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTVisionConfig) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Cast the input to the expected `dtype` expected_input_dtype = self.embeddings.patch_embedding.weight.dtype pixel_values = pixel_values.to(expected_input_dtype) hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
44,352
46,791
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,235
class OwlViTVisionModel(OwlViTPreTrainedModel): config_class = OwlViTVisionConfig main_input_name = "pixel_values" def __init__(self, config: OwlViTVisionConfig): super().__init__(config) self.vision_model = OwlViTVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTVisionModel >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, )
class_definition
46,794
48,815
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,236
class OwlViTModel(OwlViTPreTrainedModel): config_class = OwlViTConfig def __init__(self, config: OwlViTConfig): super().__init__(config) if not isinstance(config.text_config, OwlViTTextConfig): raise TypeError( "config.text_config is expected to be of type OwlViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, OwlViTVisionConfig): raise TypeError( "config.vision_config is expected to be of type OwlViTVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = OwlViTTextTransformer(text_config) self.vision_model = OwlViTVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> text_features = model.get_text_features(**inputs) ```""" # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components. return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Get embeddings for all text queries in all batch samples text_output = self.text_model(input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict) pooled_output = text_output[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(OWLVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OwlViTOutput, config_class=OwlViTConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_base_image_embeds: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, OwlViTOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) # Get embeddings for all text queries in all batch samples text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) # normalized features image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True) text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True) # cosine similarity as logits and set it on the correct device logit_scale = self.logit_scale.exp().to(image_embeds.device) logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = owlvit_loss(logits_per_text) text_embeds = text_embeds_norm if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return OwlViTOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
class_definition
48,864
58,114
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,237
class OwlViTBoxPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig, out_dim: int = 4): super().__init__() width = config.vision_config.hidden_size self.dense0 = nn.Linear(width, width) self.dense1 = nn.Linear(width, width) self.gelu = nn.GELU() self.dense2 = nn.Linear(width, out_dim) def forward(self, image_features: torch.Tensor) -> torch.FloatTensor: output = self.dense0(image_features) output = self.gelu(output) output = self.dense1(output) output = self.gelu(output) output = self.dense2(output) return output
class_definition
58,117
58,755
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,238
class OwlViTClassPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig): super().__init__() out_dim = config.text_config.hidden_size self.query_dim = config.vision_config.hidden_size self.dense0 = nn.Linear(self.query_dim, out_dim) self.logit_shift = nn.Linear(self.query_dim, 1) self.logit_scale = nn.Linear(self.query_dim, 1) self.elu = nn.ELU() def forward( self, image_embeds: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor], query_mask: Optional[torch.Tensor], ) -> Tuple[torch.FloatTensor]: image_class_embeds = self.dense0(image_embeds) if query_embeds is None: device = image_class_embeds.device batch_size, num_patches = image_class_embeds.shape[:2] pred_logits = torch.zeros((batch_size, num_patches, self.query_dim)).to(device) return (pred_logits, image_class_embeds) # Normalize image and text features image_class_embeds = image_class_embeds / (torch.linalg.norm(image_class_embeds, dim=-1, keepdim=True) + 1e-6) query_embeds = query_embeds / (torch.linalg.norm(query_embeds, dim=-1, keepdim=True) + 1e-6) # Get class predictions pred_logits = torch.einsum("...pd,...qd->...pq", image_class_embeds, query_embeds) # Apply a learnable shift and scale to logits logit_shift = self.logit_shift(image_embeds) logit_scale = self.logit_scale(image_embeds) logit_scale = self.elu(logit_scale) + 1 pred_logits = (pred_logits + logit_shift) * logit_scale if query_mask is not None: if query_mask.ndim > 1: query_mask = torch.unsqueeze(query_mask, dim=-2) pred_logits = torch.where(query_mask == 0, torch.finfo(pred_logits.dtype).min, pred_logits) pred_logits = pred_logits.to(torch.float32) return (pred_logits, image_class_embeds)
class_definition
58,758
60,740
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,239
class OwlViTForObjectDetection(OwlViTPreTrainedModel): config_class = OwlViTConfig def __init__(self, config: OwlViTConfig): super().__init__(config) self.owlvit = OwlViTModel(config) self.class_head = OwlViTClassPredictionHead(config) self.box_head = OwlViTBoxPredictionHead(config) self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps) self.sigmoid = nn.Sigmoid() self.config = config self.num_patches_height = self.config.vision_config.image_size // self.config.vision_config.patch_size self.num_patches_width = self.config.vision_config.image_size // self.config.vision_config.patch_size self.box_bias = self.compute_box_bias(self.num_patches_height, self.num_patches_width) @staticmethod def normalize_grid_corner_coordinates(num_patches_height: int, num_patches_width: int) -> torch.Tensor: # Create grid coordinates using torch x_coordinates = torch.arange(1, num_patches_width + 1, dtype=torch.float32) y_coordinates = torch.arange(1, num_patches_height + 1, dtype=torch.float32) xx, yy = torch.meshgrid(x_coordinates, y_coordinates, indexing="xy") # Stack the coordinates and divide by their respective patch counts box_coordinates = torch.stack((xx, yy), dim=-1) box_coordinates[..., 0] /= num_patches_width box_coordinates[..., 1] /= num_patches_height # Flatten (h, w, 2) -> (h*w, 2) box_coordinates = box_coordinates.view(-1, 2) return box_coordinates @lru_cache(maxsize=2) def compute_box_bias( self, num_patches_height: int, num_patches_width: int, feature_map: Optional[torch.FloatTensor] = None ) -> torch.Tensor: if feature_map is not None: raise ValueError("feature_map has been deprecated as an input. Please pass in num_patches instead") # The box center is biased to its position on the feature grid box_coordinates = self.normalize_grid_corner_coordinates(num_patches_height, num_patches_width) box_coordinates = torch.clip(box_coordinates, 0.0, 1.0) # Unnormalize xy box_coord_bias = torch.log(box_coordinates + 1e-4) - torch.log1p(-box_coordinates + 1e-4) # The box size is biased to the patch size box_size = torch.full_like(box_coord_bias, 1.0) box_size[..., 0] /= num_patches_width box_size[..., 1] /= num_patches_height box_size_bias = torch.log(box_size + 1e-4) - torch.log1p(-box_size + 1e-4) # Compute box bias box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1) return box_bias def box_predictor( self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: """ Args: image_feats: Features extracted from the image, returned by the `image_text_embedder` method. feature_map: A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method. interpolate_pos_encoding: Whether to interpolate the pre-trained position encodings. Returns: pred_boxes: List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary. """ # Bounding box detection head [batch_size, num_boxes, 4]. pred_boxes = self.box_head(image_feats) # Compute the location of each token on the grid and use it to compute a bias for the bbox prediction if interpolate_pos_encoding: _, num_patches_height, num_patches_width, _ = feature_map.shape box_bias = self.compute_box_bias(num_patches_height, num_patches_width) else: box_bias = self.box_bias box_bias = box_bias.to(feature_map.device) pred_boxes += box_bias pred_boxes = self.sigmoid(pred_boxes) return pred_boxes def class_predictor( self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor] = None, query_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.FloatTensor]: """ Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid. """ (pred_logits, image_class_embeds) = self.class_head(image_feats, query_embeds, query_mask) return (pred_logits, image_class_embeds) def image_text_embedder( self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Tuple[torch.FloatTensor]: # Encode text and image outputs = self.owlvit( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width # Get image embeddings last_hidden_state = outputs.vision_model_output[0] image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state) # Resize class token class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) # Merge image embedding with class tokens image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) # Resize to [batch_size, num_patches_height, num_patches_width, hidden_size] new_size = ( image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1], ) image_embeds = image_embeds.reshape(new_size) text_embeds = outputs[-4] return (text_embeds, image_embeds, outputs) def image_embedder( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Tuple[torch.FloatTensor]: # Get OwlViTModel vision embeddings (same as CLIP) vision_outputs = self.owlvit.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True ) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width # Apply post_layernorm to last_hidden_state, return non-projected output last_hidden_state = vision_outputs[0] image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state) # Resize class token class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) # Merge image embedding with class tokens image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) # Resize to [batch_size, num_patches_height, num_patches_width, hidden_size] new_size = ( image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1], ) image_embeds = image_embeds.reshape(new_size) return (image_embeds, vision_outputs) def embed_image_query( self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: _, class_embeds = self.class_predictor(query_image_features) pred_boxes = self.box_predictor(query_image_features, query_feature_map, interpolate_pos_encoding) pred_boxes_as_corners = center_to_corners_format(pred_boxes) # Loop over query images best_class_embeds = [] best_box_indices = [] pred_boxes_device = pred_boxes_as_corners.device for i in range(query_image_features.shape[0]): each_query_box = torch.tensor([[0, 0, 1, 1]], device=pred_boxes_device) each_query_pred_boxes = pred_boxes_as_corners[i] ious, _ = box_iou(each_query_box, each_query_pred_boxes) # If there are no overlapping boxes, fall back to generalized IoU if torch.all(ious[0] == 0.0): ious = generalized_box_iou(each_query_box, each_query_pred_boxes) # Use an adaptive threshold to include all boxes within 80% of the best IoU iou_threshold = torch.max(ious) * 0.8 selected_inds = (ious[0] >= iou_threshold).nonzero() if selected_inds.numel(): selected_embeddings = class_embeds[i][selected_inds.squeeze(1)] mean_embeds = torch.mean(class_embeds[i], axis=0) mean_sim = torch.einsum("d,id->i", mean_embeds, selected_embeddings) best_box_ind = selected_inds[torch.argmin(mean_sim)] best_class_embeds.append(class_embeds[i][best_box_ind]) best_box_indices.append(best_box_ind) if best_class_embeds: query_embeds = torch.stack(best_class_embeds) box_indices = torch.stack(best_box_indices) else: query_embeds, box_indices = None, None return query_embeds, box_indices, pred_boxes @add_start_docstrings_to_model_forward(OWLVIT_IMAGE_GUIDED_OBJECT_DETECTION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OwlViTImageGuidedObjectDetectionOutput, config_class=OwlViTConfig) def image_guided_detection( self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> OwlViTImageGuidedObjectDetectionOutput: r""" Returns: Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import AutoProcessor, OwlViTForObjectDetection >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.Tensor([image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes ... ) >>> i = 0 # Retrieve predictions for the first image >>> boxes, scores = results[i]["boxes"], results[i]["scores"] >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39] Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # Compute feature maps for the input and query images query_feature_map = self.image_embedder( pixel_values=query_pixel_values, interpolate_pos_encoding=interpolate_pos_encoding )[0] feature_map, vision_outputs = self.image_embedder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) batch_size, num_patches_height, num_patches_width, hidden_dim = query_feature_map.shape query_image_feats = torch.reshape( query_feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim) ) # Get top class embedding and best box index for each query image in batch query_embeds, best_box_indices, query_pred_boxes = self.embed_image_query( query_image_feats, query_feature_map, interpolate_pos_encoding ) # Predict object classes [batch_size, num_patches, num_queries+1] (pred_logits, class_embeds) = self.class_predictor(image_feats=image_feats, query_embeds=query_embeds) # Predict object boxes target_pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = ( feature_map, query_feature_map, target_pred_boxes, query_pred_boxes, pred_logits, class_embeds, vision_outputs.to_tuple(), ) output = tuple(x for x in output if x is not None) return output return OwlViTImageGuidedObjectDetectionOutput( image_embeds=feature_map, query_image_embeds=query_feature_map, target_pred_boxes=target_pred_boxes, query_pred_boxes=query_pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=None, vision_model_output=vision_outputs, ) @add_start_docstrings_to_model_forward(OWLVIT_OBJECT_DETECTION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OwlViTObjectDetectionOutput, config_class=OwlViTConfig) def forward( self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> OwlViTObjectDetectionOutput: r""" Returns: Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # Embed images and text queries query_embeds, feature_map, outputs = self.image_text_embedder( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) # Text and vision model outputs text_outputs = outputs.text_model_output vision_outputs = outputs.vision_model_output batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) # Reshape from [batch_size * max_text_queries, hidden_dim] -> [batch_size, max_text_queries, hidden_dim] max_text_queries = input_ids.shape[0] // batch_size query_embeds = query_embeds.reshape(batch_size, max_text_queries, query_embeds.shape[-1]) # If first token is 0, then this is a padded query [batch_size, num_queries]. input_ids = input_ids.reshape(batch_size, max_text_queries, input_ids.shape[-1]) query_mask = input_ids[..., 0] > 0 # Predict object classes [batch_size, num_patches, num_queries+1] (pred_logits, class_embeds) = self.class_predictor(image_feats, query_embeds, query_mask) # Predict object boxes pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = ( pred_logits, pred_boxes, query_embeds, feature_map, class_embeds, text_outputs.to_tuple(), vision_outputs.to_tuple(), ) output = tuple(x for x in output if x is not None) return output return OwlViTObjectDetectionOutput( image_embeds=feature_map, text_embeds=query_embeds, pred_boxes=pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
class_definition
60,743
81,451
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/modeling_owlvit.py
null
7,240
class OwlViTFeatureExtractor(OwlViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use OwlViTImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
815
1,185
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/feature_extraction_owlvit.py
null
7,241
class OwlViTImageProcessor(BaseImageProcessor): r""" Constructs an OWL-ViT image processor. This image processor inherits from [`ImageProcessingMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the shorter edge of the input to a certain `size`. size (`Dict[str, int]`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized to (size, size). resample (`int`, *optional*, defaults to `Resampling.BICUBIC`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`int`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for center cropping the image. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input by a certain factor. rescale_factor (`float`, *optional*, defaults to `1/255`): The factor to use for rescaling the image. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. image_mean (`List[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): The sequence of means for each channel, to be used when normalizing images. image_std (`List[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): The sequence of standard deviations for each channel, to be used when normalizing images. """ model_input_names = ["pixel_values"] def __init__( self, do_resize=True, size=None, resample=PILImageResampling.BICUBIC, do_center_crop=False, crop_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=None, image_std=None, **kwargs, ): size = size if size is not None else {"height": 768, "width": 768} size = get_size_dict(size, default_to_square=True) crop_size = crop_size if crop_size is not None else {"height": 768, "width": 768} crop_size = get_size_dict(crop_size, default_to_square=True) # Early versions of the OWL-ViT config on the hub had "rescale" as a flag. This clashes with the # vision image processor method `rescale` as it would be set as an attribute during the super().__init__ # call. This is for backwards compatibility. if "rescale" in kwargs: rescale_val = kwargs.pop("rescale") kwargs["do_rescale"] = rescale_val super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to a certain size. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): The size to resize the image to. Must contain height and width keys. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use when resizing the input. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=True) if "height" not in size or "width" not in size: raise ValueError("size dictionary must contain height and width keys") return resize( image, (size["height"], size["width"]), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def center_crop( self, image: np.ndarray, crop_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to a certain size. Args: image (`np.ndarray`): Image to center crop. crop_size (`Dict[str, int]`): The size to center crop the image to. Must contain height and width keys. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ crop_size = get_size_dict(crop_size, default_to_square=True) if "height" not in crop_size or "width" not in crop_size: raise ValueError("crop_size dictionary must contain height and width keys") return center_crop( image, (crop_size["height"], crop_size["width"]), data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Prepares an image or batch of images for the model. Args: images (`ImageInput`): The image or batch of images to be prepared. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`. size (`Dict[str, int]`, *optional*, defaults to `self.size`): The size to resize the input to. Only has an effect if `do_resize` is set to `True`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether or not to center crop the input. If `True`, will center crop the input to the size specified by `crop_size`. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether or not to rescale the input. If `True`, will rescale the input by dividing it by `rescale_factor`. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean` and dividing by `image_std`. image_mean (`Union[float, List[float]]`, *optional*, defaults to `self.image_mean`): The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to `True`. image_std (`Union[float, List[float]]`, *optional*, defaults to `self.image_std`): The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ # TODO: (amy) add support for other frameworks warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.", FutureWarning, ) logits, boxes = outputs.logits, outputs.pred_boxes if len(logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(boxes) # Convert from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs: "OwlViTObjectDetectionOutput", threshold: float = 0.1, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, ): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. """ batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes batch_size = len(batch_logits) if target_sizes is not None and len(target_sizes) != batch_size: raise ValueError("Make sure that you pass in as many target sizes as images") # batch_logits of shape (batch_size, num_queries, num_classes) batch_class_logits = torch.max(batch_logits, dim=-1) batch_scores = torch.sigmoid(batch_class_logits.values) batch_labels = batch_class_logits.indices # Convert to [x0, y0, x1, y1] format batch_boxes = center_to_corners_format(batch_boxes) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: batch_boxes = _scale_boxes(batch_boxes, target_sizes) results = [] for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes): keep = scores > threshold scores = scores[keep] labels = labels[keep] boxes = boxes[keep] results.append({"scores": scores, "labels": labels, "boxes": boxes}) return results def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. """ logits, target_boxes = outputs.logits, outputs.target_pred_boxes if target_sizes is not None and len(logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes is not None and target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) # Convert to [x0, y0, x1, y1] format target_boxes = center_to_corners_format(target_boxes) # Apply non-maximum suppression (NMS) if nms_threshold < 1.0: for idx in range(target_boxes.shape[0]): for i in torch.argsort(-scores[idx]): if not scores[idx][i]: continue ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0] ious[i] = -1.0 # Mask self-IoU. scores[idx][ious > nms_threshold] = 0.0 # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: target_boxes = _scale_boxes(target_boxes, target_sizes) # Compute box display alphas based on prediction scores results = [] alphas = torch.zeros_like(scores) for idx in range(target_boxes.shape[0]): # Select scores for boxes matching the current query: query_scores = scores[idx] if not query_scores.nonzero().numel(): continue # Apply threshold on scores before scaling query_scores[query_scores < threshold] = 0.0 # Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1. # All other boxes will either belong to a different query, or will not be shown. max_score = torch.max(query_scores) + 1e-6 query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9) query_alphas = torch.clip(query_alphas, 0.0, 1.0) alphas[idx] = query_alphas mask = alphas[idx] > 0 box_scores = alphas[idx][mask] boxes = target_boxes[idx][mask] results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results
class_definition
4,062
29,368
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/owlvit/image_processing_owlvit.py
null
7,242
class VipLlavaCausalLMOutputWithPast(ModelOutput): """ Base class for VipLlava causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
class_definition
1,404
3,996
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vipllava/modeling_vipllava.py
null
7,243
class VipLlavaMultiModalProjector(nn.Module): def __init__(self, config: VipLlavaConfig): super().__init__() self.projector_layernorm = nn.LayerNorm( len(config.vision_feature_layers) * config.vision_config.hidden_size, eps=config.projector_layernorm_eps ) self.linear_1 = nn.Linear( len(config.vision_feature_layers) * config.vision_config.hidden_size, config.text_config.hidden_size, bias=True, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) def forward(self, hidden_states): hidden_states = self.projector_layernorm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class_definition
3,999
4,940
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vipllava/modeling_vipllava.py
null
7,244
class VipLlavaPreTrainedModel(PreTrainedModel): config_class = VipLlavaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["VipLlavaVisionAttention"] _skip_keys_device_placement = "past_key_values" _supports_cache_class = True _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): # important: this ported version of VipLlava isn't meant for training from scratch - only # inference and fine-tuning - so the proper init weights code has been removed - the original codebase # https://github.com/haotian-liu/LLaVA/tree/main/vipllava should serve for that purpose std = ( self.config.initializer_range if hasattr(self.config, "initializer_range") else self.config.text_config.initializer_range ) if hasattr(module, "class_embedding"): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
6,115
7,520
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vipllava/modeling_vipllava.py
null
7,245
class VipLlavaForConditionalGeneration(VipLlavaPreTrainedModel, GenerationMixin): def __init__(self, config: VipLlavaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = VipLlavaMultiModalProjector(config) self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config(config.text_config) if self.language_model._tied_weights_keys is not None: self._tied_weights_keys = [f"language_model.{k}" for k in self.language_model._tied_weights_keys] self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() # Ignore copy def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layers: List[int]): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. vision_feature_layers (`List[int]`): The list og indexes of the layers to select the vision feature. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) # For VIP-llava, the image features are computed this way # We select the features from index 1: for the layers -2, -5, -8, -11 and 6 image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers] image_features = torch.cat(image_features, dim=-1) image_features = self.multi_modal_projector(image_features) return image_features def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels): num_images, num_image_patches, embed_dim = image_features.shape batch_size, sequence_length = input_ids.shape left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id)) # 1. Create a mask to know where special image tokens are special_image_token_mask = input_ids == self.config.image_token_index num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1) # Compute the maximum embed dimension max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index) # 2. Compute the positions where text should be written # Calculate new positions for text tokens in merged image-text sequence. # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens. # `torch.cumsum` computes how each image token shifts subsequent text token positions. # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1 nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1] if left_padding: new_token_positions += nb_image_pad[:, None] # offset for left padding text_to_overwrite = new_token_positions[batch_indices, non_image_indices] # 3. Create the full embedding, already padded to the maximum position final_embedding = torch.zeros( batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device ) final_attention_mask = torch.zeros( batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device ) if labels is not None: final_labels = torch.full( (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device ) # In case the Vision model or the Language model has been offloaded to CPU, we need to manually # set the corresponding tensors into their correct target device. target_device = inputs_embeds.device batch_indices, non_image_indices, text_to_overwrite = ( batch_indices.to(target_device), non_image_indices.to(target_device), text_to_overwrite.to(target_device), ) attention_mask = attention_mask.to(target_device) # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"] # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices] final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices] if labels is not None: final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices] # 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835) image_to_overwrite = torch.full( (batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device ) image_to_overwrite[batch_indices, text_to_overwrite] = False if left_padding: image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device) else: mask = torch.ones_like(image_to_overwrite, dtype=torch.bool).cumsum(-1) - 1 padding_mask = mask <= new_token_positions[:, -1:].to(target_device) image_to_overwrite &= padding_mask if image_to_overwrite.sum() != image_features.shape[:-1].numel(): raise ValueError( f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while" f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation." ) final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device) final_attention_mask |= image_to_overwrite position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1) # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens. batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id) indices_to_mask = new_token_positions[batch_indices, pad_indices] final_embedding[batch_indices, indices_to_mask] = 0 if labels is None: final_labels = None return final_embedding, final_attention_mask, final_labels, position_ids @add_start_docstrings_to_model_forward(VIPLLAVA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=VipLlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) # Ignore copy def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layers: Optional[List[int]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, ) -> Union[Tuple, VipLlavaCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, VipLlavaForConditionalGeneration >>> model = VipLlavaForConditionalGeneration.from_pretrained("llava-hf/vip-llava-7b-hf", device_map="auto", torch_dtype=torch.float16) >>> processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf") >>> prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n{}###Assistant:" >>> question = "Can you please describe this image?" >>> prompt = prompt.format(question) >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=text, images=image, return_tensors="pt").to(0, torch.float16) >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=20) >>> processor.decode(generate_ids[0][len(inputs["input_ids"][0]):], skip_special_tokens=True) The image features a brown and white cat sitting on a green surface, with a red ball in its ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layers = ( vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if pixel_values is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one" ) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features( pixel_values=pixel_values, vision_feature_layers=vision_feature_layers ) n_image_tokens = (input_ids == self.config.image_token_index).sum().item() n_image_features = image_features.shape[0] * image_features.shape[1] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1) special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, num_logits_to_keep=num_logits_to_keep, ) logits = outputs[0] loss = None if labels is not None: # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device) ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return VipLlavaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, num_logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = self.language_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, num_logits_to_keep=num_logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values return model_inputs
class_definition
12,474
28,223
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vipllava/modeling_vipllava.py
null
7,246
class VipLlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VipLlavaForConditionalGeneration`]. It is used to instantiate an VipLlava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VipLlava-9B. e.g. [ybelkada/vip-llava-7b-hf](https://huggingface.co/ybelkada/vip-llava-7b-hf) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`VipLlavaVisionConfig`, *optional*): Custom vision config or dict text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. ignore_index (`int`, *optional*, defaults to -100): The ignore index for the loss function. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. projector_layernorm_eps (`float`, *optional*, defaults to 1e-05): The layer norm epsilon of the projector layernorm vision_feature_layers (`List[int]`, *optional*, defaults to `[-2, -5, -8, -11, 6]`): The list of layers to select the vision features from. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. Example: ```python >>> from transformers import VipLlavaForConditionalGeneration, VipLlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a VipLlava vipllava-7b style configuration >>> configuration = VipLlavaConfig(vision_config, text_config) >>> # Initializing a model from the vipllava-7b style configuration >>> model = VipLlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vipllava" sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, text_config=None, ignore_index=-100, image_token_index=32000, projector_hidden_act="gelu", projector_layernorm_eps=1e-5, vision_feature_layers=[-2, -5, -8, -11, 6], image_seq_length=576, **kwargs, ): self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.projector_layernorm_eps = projector_layernorm_eps self.vision_feature_layers = vision_feature_layers self.image_seq_length = image_seq_length self.vision_config = vision_config if isinstance(self.vision_config, dict): vision_config["model_type"] = ( vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model" ) self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING["clip_vision_model"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768, ) if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["llama"]() self.text_config = text_config super().__init__(**kwargs)
class_definition
886
5,181
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vipllava/configuration_vipllava.py
null
7,247
class IJepaPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." f" Expected {self.num_channels} but got {num_channels}." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings
class_definition
1,617
3,569
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,248
class IJepaEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: IJepaConfig, use_mask_token: bool = False) -> None: super().__init__() self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = IJepaPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] num_positions = self.position_embeddings.shape[1] # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings
class_definition
3,572
7,028
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,249
class IJepaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = IJepaConfig base_model_prefix = "ijepa" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["IJepaEmbeddings", "IJepaLayer"] _supports_sdpa = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, IJepaEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype)
class_definition
7,031
8,494
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,250
class IJepaSelfAttention(nn.Module): def __init__(self, config: IJepaConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class_definition
8,497
11,341
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,251
class IJepaSdpaSelfAttention(IJepaSelfAttention): def __init__(self, config: IJepaConfig) -> None: super().__init__(config) self.attention_probs_dropout_prob = config.attention_probs_dropout_prob def forward( self, hidden_states: torch.FloatTensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: if output_attentions or head_mask is not None: logger.warning_once( "`IJepaSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support " "`output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but " "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. " 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions, ) mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) context_layer = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, head_mask, self.attention_probs_dropout_prob if self.training else 0.0, is_causal=False, scale=None, ) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return context_layer, None
class_definition
11,344
13,390
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,252
class IJepaSelfOutput(nn.Module): """ The residual connection is defined in IJepaLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: IJepaConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
13,393
14,042
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,253
class IJepaAttention(nn.Module): def __init__(self, config: IJepaConfig) -> None: super().__init__() self.attention = IJepaSelfAttention(config) self.output = IJepaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
14,045
15,730
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,254
class IJepaSdpaAttention(IJepaAttention): def __init__(self, config: IJepaConfig) -> None: super().__init__(config) self.attention = IJepaSdpaSelfAttention(config)
class_definition
15,733
15,916
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,255
class IJepaIntermediate(nn.Module): def __init__(self, config: IJepaConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
15,919
16,507
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,256
class IJepaOutput(nn.Module): def __init__(self, config: IJepaConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states
class_definition
16,510
17,041
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,257
class IJepaLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: IJepaConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = IJEPA_ATTENTION_CLASSES[config._attn_implementation](config) self.intermediate = IJepaIntermediate(config) self.output = IJepaOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in IJepa, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in IJepa, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs
class_definition
17,137
18,865
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,258
class IJepaEncoder(nn.Module): def __init__(self, config: IJepaConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([IJepaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
18,868
20,795
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,259
class IJepaPooler(nn.Module): def __init__(self, config: IJepaConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class_definition
20,798
21,341
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,260
class IJepaModel(IJepaPreTrainedModel): def __init__(self, config: IJepaConfig, add_pooling_layer: bool = False, use_mask_token: bool = False): super().__init__(config) self.config = config self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token) self.encoder = IJepaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = IJepaPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> IJepaPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(IJEPA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?) expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype if pixel_values.dtype != expected_dtype: pixel_values = pixel_values.to(expected_dtype) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
23,417
27,664
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,261
class IJepaForImageClassification(IJepaPreTrainedModel): def __init__(self, config: IJepaConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.ijepa = IJepaModel(config, add_pooling_layer=False) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IJEPA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ijepa( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output.mean(dim=1)) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
28,336
32,033
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modeling_ijepa.py
null
7,262
class IJepaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`IJepaModel`]. It is used to instantiate an IJEPA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the I-JEPA [google/ijepa-base-patch16-224](https://huggingface.co/google/ijepa-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. Example: ```python >>> from transformers import IJepaConfig, IJepaModel >>> # Initializing a IJEPA ijepa-base-patch16-224 style configuration >>> configuration = IJepaConfig() >>> # Initializing a model (with random weights) from the ijepa-base-patch16-224 style configuration >>> model = IJepaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "ijepa" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias
class_definition
714
4,800
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/configuration_ijepa.py
null
7,263
class IJepaEmbeddings(ViTEmbeddings): def __init__(self, config: IJepaConfig, use_mask_token: bool = False) -> None: super().__init__(config, use_mask_token) # Remove cls_token from IJepaEmbeddings, as it is not used in the model del self.cls_token num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] num_positions = self.position_embeddings.shape[1] # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings
class_definition
527
3,704
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modular_ijepa.py
null
7,264
class IJepaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = IJepaConfig base_model_prefix = "ijepa" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["IJepaEmbeddings", "IJepaLayer"] _supports_sdpa = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, IJepaEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype)
class_definition
3,707
5,170
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modular_ijepa.py
null
7,265
class IJepaModel(IJepaPreTrainedModel, ViTModel): def __init__(self, config: IJepaConfig, add_pooling_layer: bool = False, use_mask_token: bool = False): super().__init__(config) self.config = config self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token)
class_definition
5,986
6,286
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modular_ijepa.py
null
7,266
class IJepaForImageClassification(IJepaPreTrainedModel, ViTForImageClassification): def __init__(self, config: IJepaConfig): super().__init__(config) self.ijepa = IJepaModel(config, add_pooling_layer=False) self.post_init() def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ijepa( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output.mean(dim=1)) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
6,920
10,103
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/ijepa/modular_ijepa.py
null
7,267
class Idefics3ImageProcessor(BaseImageProcessor): r""" Constructs a Idefics3 image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA. Only has an effect if the input image is in the PIL format. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the shortest edge resized to keep the input aspect ratio. size (`Dict`, *optional*, defaults to `{"longest_edge": 4 * 364}`): Controls the size of the output image. This is a dictionary containing the key "longest_edge". The image will be resized such that the longest edge is <= `size["longest_edge"]` and the shortest edge is resized to keep the input aspect ratio. resample (`Resampling`, *optional*, defaults to `Resampling.LANCZOS`): Resampling filter to use when resizing the image. do_image_splitting (`bool`, *optional*, defaults to `True`): Whether to split the image into sub-images concatenated with the original image. They are split into patches such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`. max_image_size (`Dict`, *optional*, defaults to `{"longest_edge": 364}`): Maximum resolution of the patches of images accepted by the model. This is a dictionary containing the key "longest_edge". do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1. rescale_factor (`float`, *optional*, defaults to `1/255`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and a standard deviation of `image_std`. image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether or not to pad the images to the largest height and width in the batch and number of images per sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width). """ model_input_names = ["pixel_values"] def __init__( self, do_convert_rgb: bool = True, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.LANCZOS, do_image_splitting: bool = True, max_image_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) self.do_convert_rgb = do_convert_rgb self.do_resize = do_resize self.size = size if size is not None else {"longest_edge": 4 * 364} self.resample = resample self.do_image_splitting = do_image_splitting self.max_image_size = max_image_size if max_image_size is not None else {"longest_edge": 364} self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_pad = do_pad def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The longest edge of the image is resized to size["longest_edge"], with the shortest edge resized to keep the input aspect ratio. Can also be used with size["height"] and size["width"]. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4)) # For all transformations, we want to keep the same data format as the input image unless otherwise specified. # The resized image from PIL will always have channels last, so find the input format first. data_format = input_data_format if data_format is None else data_format if "longest_edge" in size: size = get_resize_output_image_size( image, resolution_max_side=size["longest_edge"], input_data_format=input_data_format ) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.") image_mode = None if image.ndim == 2 or image.shape[-1] == 1: image_mode = "P" image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format) resized_image = image.resize((size[1], size[0]), resample=resample) resized_image = np.array(resized_image) # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image # so we need to add it back if necessary. resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image # The image is always in channels last format after converting from a PIL image resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.LAST ) return resized_image def split_image( self, image, max_image_size: Dict[str, int], resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Split an image into squares of side max_image_size and the original image resized to max_image_size. That means that a single image becomes a sequence of images. This is a "trick" to spend more compute on each image with no changes in the vision encoder. 1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio. 2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)` sub-images of the same size each (image_size, image_size). Typically, 364x364. 3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width. Args: image (`np.ndarray`): Images to split. max_image_size (`Dict[str, int]`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ height, width = get_image_size(image, channel_dim=input_data_format) max_height = max_width = max_image_size["longest_edge"] frames = [] if height > max_height or width > max_width: # Calculate the number of splits num_splits_h = math.ceil(height / max_height) num_splits_w = math.ceil(width / max_width) # Calculate the optimal width and height for the sub-images optimal_height = math.ceil(height / num_splits_h) optimal_width = math.ceil(width / num_splits_w) # Iterate through each row and column for r in range(num_splits_h): for c in range(num_splits_w): # Calculate the starting point of the crop start_x = c * optimal_width start_y = r * optimal_height # Calculate the ending point of the crop end_x = min(start_x + optimal_width, width) end_y = min(start_y + optimal_height, height) # Crop the image cropped_image = _crop( image, start_x, start_y, end_x, end_y, data_format=data_format, ) frames.append(cropped_image) # For the global image at the end, we resize it to match the max_image_size, for cpu memory efficiency global_image_height, global_image_width = max_height, max_width if height != global_image_height or width != global_image_width: image = self.resize( image, {"height": global_image_height, "width": global_image_width}, resample=resample, input_data_format=data_format, ) else: num_splits_h, num_splits_w = 0, 0 frames.append(image) return frames, num_splits_h, num_splits_w def resize_for_vision_encoder( self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio. Args: image (`np.ndarray`): Images to resize. vision_encoder_max_size (`int`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred """ height, width = get_image_size(image, channel_dim=input_data_format) aspect_ratio = width / height if width >= height: width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size height = int(width / aspect_ratio) height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size elif height > width: height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size width = int(height * aspect_ratio) width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size new_size = {"height": height, "width": width} return self.resize( image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format ) def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width. For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask. Args: images (`List[np.ndarray]`): List of list of images to pad. Pads to the largest height and width in the batch. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) batch_size = len(images) max_num_images = max(len(images_) for images_ in images) input_data_format = ( infer_channel_dimension_format(images[0][0], num_channels=(1, 3, 4)) if input_data_format is None else input_data_format ) data_format = input_data_format if data_format is None else data_format if input_data_format == ChannelDimension.FIRST: n_channels = images[0][0].shape[0] elif input_data_format == ChannelDimension.LAST: n_channels = images[0][0].shape[-1] else: raise ValueError("Invalid channel dimension format.") def empty_image(size, input_data_format): if input_data_format == ChannelDimension.FIRST: return np.zeros((n_channels, *size), dtype=np.uint8) elif input_data_format == ChannelDimension.LAST: return np.zeros((*size, n_channels), dtype=np.uint8) padded_images_list = [ [empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size) ] padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)] for batch_idx in range(batch_size): for sample_idx, image in enumerate(images[batch_idx]): padded_images_list[batch_idx][sample_idx] = self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) padded_masks[batch_idx][sample_idx] = make_pixel_mask( image, output_size=pad_size, input_data_format=input_data_format ) padded_masks = padded_masks if return_pixel_mask else None return padded_images_list, padded_masks def preprocess( self, images: ImageInput, do_convert_rgb: Optional[bool] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_image_splitting: Optional[bool] = None, do_rescale: Optional[bool] = None, max_image_size: Optional[Dict[str, int]] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_row_col_info: bool = False, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess a batch of images. Args: images (`ImageInput`): A list of images to preprocess. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. With the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`): Whether to split the image into sub-images concatenated with the original image. They are split into patches such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`. max_image_size (`Dict`, *optional*, defaults to `self.max_image_size`): Maximum resolution of the images. If the image is larger than this size, the image is split into patches. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether or not to pad the images to the largest height and width in the batch. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. return_row_col_info (`bool`, *optional*, default to `False`): Whether to return the number of rows and columns of the split images. This is used for the `Idefics3Processor` to generate prompt strings based on the number of rows and columns. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting max_image_size = max_image_size if max_image_size is not None else self.max_image_size do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_pad = do_pad if do_pad is not None else self.do_pad images_list = make_list_of_images(images) if not valid_images(images_list[0]): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # save the palettes for conversion to RGB palettes_list = [ [im.getpalette() if isinstance(im, Image.Image) and im.mode == "P" else None for im in images] for images in images_list ] # All transformations expect numpy arrays. images_list = [[to_numpy_array(image) for image in images] for images in images_list] # Extra channel dimension for grayscale images if input_data_format in [ChannelDimension.LAST, None]: images_list = [ [np.expand_dims(img, axis=-1) if img.ndim == 2 else img for img in images] for images in images_list ] elif input_data_format == ChannelDimension.FIRST: images_list = [ [np.expand_dims(img, axis=0) if img.ndim == 2 else img for img in images] for images in images_list ] if do_rescale and is_scaled_image(images_list[0][0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) # We assume that all images have the same channel dimension format. if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0], num_channels=(1, 3, 4)) if do_resize: images_list = [ [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] for images in images_list ] if do_image_splitting: # We first resize both height and width of each image to the nearest max_image_size multiple, disregarding the aspect ratio # for size=(10, max_image_size) -> rescaled_size=(max_image_size, max_image_size) # for size=(11, max_image_size+1) -> rescaled_size=(max_image_size, max_image_size*2) images_list = [ [ self.resize_for_vision_encoder( image, max_image_size["longest_edge"], resample=resample, input_data_format=input_data_format ) for image in images ] for images in images_list ] images_list_split_arrays = [] palettes_list_split_arrays = [] images_list_rows = [] images_list_cols = [] for images, palettes in zip(images_list, palettes_list): split_image_arrays = [] split_palettes_arrays = [] image_rows = [] image_cols = [] for image, palette in zip(images, palettes): split_image_array, rows, cols = self.split_image( image, max_image_size=max_image_size, input_data_format=input_data_format, ) split_image_arrays.extend(split_image_array) split_palettes_arrays.extend([palette] * len(split_image_array)) image_rows.append(rows) image_cols.append(cols) images_list_split_arrays.append(split_image_arrays) palettes_list_split_arrays.append(split_palettes_arrays) images_list_rows.append(image_rows) images_list_cols.append(image_cols) images_list = images_list_split_arrays palettes_list = palettes_list_split_arrays else: # We square the images to max_image_size images_list = [ [ self.resize( image=image, size={"height": max_image_size["longest_edge"], "width": max_image_size["longest_edge"]}, resample=resample, input_data_format=input_data_format, ) for image in images ] for images in images_list ] images_list_rows = [[0] * len(images) for images in images_list] images_list_cols = [[0] * len(images) for images in images_list] if do_convert_rgb: images_list = [ [convert_to_rgb(img, palette) for img, palette in zip(images, palettes)] for images, palettes in zip(images_list, palettes_list) ] if do_rescale: images_list = [ [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] for images in images_list ] if do_normalize: images_list = [ [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] for images in images_list ] pixel_attention_mask = None if do_pad: images_list, pixel_attention_mask = self.pad( images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format ) if data_format is not None: images_list = [ [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] for images in images_list ] # Faster tensor conversion data = {"pixel_values": np.array(images_list) if do_pad and return_tensors is not None else images_list} if pixel_attention_mask is not None: data["pixel_attention_mask"] = ( np.array(pixel_attention_mask) if do_pad and return_tensors is not None else pixel_attention_mask ) encoding = BatchFeature(data=data, tensor_type=return_tensors) # This is needed for generating correct text inputs in the processor - we don't pad to the max number of images if return_row_col_info: encoding["rows"] = images_list_rows encoding["cols"] = images_list_cols return encoding
class_definition
10,321
42,407
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/image_processing_idefics3.py
null
7,268
class Idefics3ImagesKwargs(ImagesKwargs, total=False): return_row_col_info: Optional[bool] max_image_size: Optional[Dict[str, int]]
class_definition
2,941
3,080
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/processing_idefics3.py
null
7,269
class Idefics3ProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Idefics3ImagesKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "is_split_into_words": False, }, "images_kwargs": { "return_row_col_info": True, }, }
class_definition
3,083
3,436
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/processing_idefics3.py
null
7,270
class Idefics3Processor(ProcessorMixin): r""" Constructs a Idefics3 processor which wraps a LLama tokenizer and Idefics3 image processor into a single processor. [`Idefics3Processor`] offers all the functionalities of [`Idefics3ImageProcessor`] and [`Idefics3TokenizerFast`]. See the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information. Args: image_processor (`Idefics3ImageProcessor`): An instance of [`Idefics3ImageProcessor`]. The image processor is a required input. tokenizer (`PreTrainedTokenizerBase`, *optional*): An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input. image_seq_len (`int`, *optional*, defaults to 169): The length of the image sequence i.e. the number of <image> tokens per image in the input. This parameter is used to build the string from the input prompt and image tokens and should match the value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2)) chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "Idefics3ImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer=None, image_seq_len: int = 169, chat_template: str = None, **kwargs): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True) self.image_token = AddedToken("<image>", normalized=False, special=True) self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True) self.global_image_tag = "<global-img>" # https://github.com/huggingface/transformers/pull/32473/files/8063e5e17362571b693f1db95167f5443a3be1b2#r1734825341 self.image_seq_len = image_seq_len # This regex matches one or more occurrences of <global-img> tags (optionally surrounded by newline characters) # or <row_x_col_y> tags (where x and y are digits, also optionally surrounded by newline characters). self._regex_to_remove_extra_special_tokens = re.compile(r"(\n?<global-img>\n?|<row_\d+_col_\d+>\n?)+") tokens_to_add = { "additional_special_tokens": [ self.fake_image_token, self.image_token, self.end_of_utterance_token, ] } tokenizer.add_special_tokens(tokens_to_add) super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs) def _extract_images_from_prompts(self, prompts): prompt_images = [] for prompt in prompts: images = [] for elem in prompt: if is_valid_image(elem): images.append(elem) elif is_url(elem): images.append(load_image(elem)) prompt_images.append(images) return prompt_images def __call__( self, images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]] = None, text: Union[TextInput, "PreTokenizedInput", List[TextInput], List["PreTokenizedInput"]] = None, audio=None, videos=None, image_seq_len: Optional[int] = None, **kwargs: Unpack[Idefics3ProcessorKwargs], ) -> BatchEncoding: """ Processes the input prompts and returns a BatchEncoding. Example: ```python >>> import requests >>> from transformers import Idefics3Processor >>> from transformers.image_utils import load_image >>> processor = Idefics3Processor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3") >>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example >>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" >>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg" >>> image1, image2 = load_image(url1), load_image(url2) >>> images = [[image1], [image2]] >>> text = [ ... "<image>In this image, we see", ... "bla bla bla<image>", ... ] >>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True) >>> input_ids = outputs.input_ids >>> input_tokens = processor.tokenizer.batch_decode(input_ids) >>> print(input_tokens) ['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>'] ``` Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1. text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Wherever an image token, `<image>` is encountered it is expanded to `<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`. image_seq_len (`int`, *optional*): The length of the image sequence. If not provided, the default value of self.image_seq_len is used. image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2)) return_tensors (`Union[str, TensorType]`, *optional*): If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more information. """ if text is None and images is None: raise ValueError("You must provide either `text` or `images`.") output_kwargs = self._merge_kwargs( Idefics3ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len n_images_in_text = [] n_images_in_images = [] inputs = BatchFeature() if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [sample.count(self.image_token.content) for sample in text] if images is not None: if is_image_or_image_url(images): images = [[images]] elif isinstance(images, list) and is_image_or_image_url(images[0]): if text is not None: if sum(n_images_in_text) != len(images): raise ValueError( f"The total number of {self.image_token.content} tokens in the prompts should be the same as the number of images passed." f" Found {sum(n_images_in_text)} {self.image_token.content} tokens and {len(images)} images." ) # Reorganize the images to match the prompts cumsum_images_in_text = [0] + list(accumulate(n_images_in_text)) images = [ images[cumsum_images_in_text[i] : cumsum_images_in_text[i + 1]] for i in range(len(n_images_in_text)) ] else: images = [images] elif ( not isinstance(images, list) and not isinstance(images[0], list) and not is_image_or_image_url(images[0][0]) ): raise ValueError( "Invalid input images. Please provide a single image or a list of images or a list of list of images." ) n_images_in_images = [len(sample) for sample in images] # Load images if they are URLs images = [[load_image(im) if is_url(im) else im for im in sample] for sample in images] image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) inputs.update(image_inputs) if text is not None: if n_images_in_images != n_images_in_text: raise ValueError( f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same." ) image_rows = inputs.pop("rows", [[0] * len(text)]) image_cols = inputs.pop("cols", [[0] * len(text)]) fake_image_token = self.fake_image_token.content image_token = self.image_token.content global_img_token = self.global_image_tag prompt_strings = [] for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols): # Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len` image_prompt_strings = [] for n_rows, n_cols in zip(sample_rows, sample_cols): image_prompt_string = get_image_prompt_string( n_rows, n_cols, image_seq_len, image_token=image_token, fake_token_around_image=fake_image_token, global_img_token=global_img_token, ) image_prompt_strings.append(image_prompt_string) split_sample = sample.split(image_token) if len(split_sample) == 0: raise ValueError("The image token should be present in the text.") # Place in the image prompt strings where the image tokens are sample = split_sample[0] for i, image_prompt_string in enumerate(image_prompt_strings): sample += image_prompt_string + split_sample[i + 1] prompt_strings.append(sample) text_inputs = self.tokenizer(text=prompt_strings, **output_kwargs["text_kwargs"]) inputs.update(text_inputs) elif text is not None: if any(n_images_in_text): raise ValueError( f"Found {sum(n_images_in_text)} {self.image_token.content} tokens in the text but no images were passed." ) text_inputs = self.tokenizer(text=text, **output_kwargs["text_kwargs"]) inputs.update(text_inputs) return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Idefics3TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ batched_decode_output = self.tokenizer.batch_decode(*args, **kwargs) return [self._regex_to_remove_extra_special_tokens.sub("<image>", s) for s in batched_decode_output] def decode(self, *args, **kwargs): """ This method forwards all its arguments to Idefics3TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ decode_output = self.tokenizer.decode(*args, **kwargs) return self._regex_to_remove_extra_special_tokens.sub("<image>", decode_output) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class_definition
3,549
16,592
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/processing_idefics3.py
null
7,271
class Idefics3VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Idefics3VisionModel`]. It is used to instantiate a Idefics3 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics3 model [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers.models.idefics3.modeling_idefics3 import Idefics3VisionTransformer >>> from transformers.models.idefics3.configuration_idefics3 import Idefics3VisionConfig >>> # Initializing a Idefics3VisionConfig with google/siglip-base-patch16-224 style configuration >>> configuration = Idefics3VisionConfig() >>> # Initializing a Idefics3VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = Idefics3VisionTransformer(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "idefics3_vision" base_config_key = "vision_config" def __init__( self, hidden_size=1152, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=16, num_channels=3, image_size=224, patch_size=32, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range
class_definition
829
4,886
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/configuration_idefics3.py
null
7,272
class Idefics3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Idefics3Model`]. It is used to instantiate a Idefics3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Idefics3 [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should cache the key/value pairs of the attention mechanism. Only relevant if `config.is_decoder=True`. image_token_id (`int`, *optional*, defaults to 128257): The id of the "image" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to tie the word embeddings with the token embeddings. vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`): Custom vision config or dict for the vision tower text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`): Custom text config or dict for the text model scale_factor (`int`, *optional*, defaults to 2): The scale factor for the image encoder. pad_token_id (`int`, *optional*, defaults to 128002): The id of the padding token. Example: ```python >>> from transformers import Idefics3Model, Idefics3Config >>> # Initializing configuration >>> configuration = Idefics3Config() >>> # Initializing a model from the configuration >>> model = Idefics3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "idefics3" sub_configs = {"text_config": AutoConfig, "vision_config": Idefics3VisionConfig} def __init__( self, use_cache=True, image_token_id=128257, tie_word_embeddings=False, vision_config=None, text_config=None, scale_factor=2, pad_token_id=128_002, **kwargs, ): self.image_token_id = image_token_id self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings if vision_config is None: self.vision_config = Idefics3VisionConfig() logger.info("vision_config is None, using default vision config") elif isinstance(vision_config, dict): self.vision_config = Idefics3VisionConfig(**vision_config) elif isinstance(vision_config, Idefics3VisionConfig): self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: logger.info("text_config is None, using default text config") text_config = CONFIG_MAPPING["llama"]( rms_norm_eps=1e-5, pad_token_id=pad_token_id, tie_word_embeddings=False, ) self.text_config = text_config self.scale_factor = scale_factor super().__init__(**kwargs, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings)
class_definition
4,889
8,541
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/configuration_idefics3.py
null
7,273
class Idefics3BaseModelOutputWithPast(ModelOutput): """ Base class for Idefics3 model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
1,636
4,499
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,274
class Idefics3CausalLMOutputWithPast(ModelOutput): """ Base class for Idefics causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
4,513
7,128
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,275
class Idefics3VisionEmbeddings(nn.Module): """ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable resolution. The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304) which allows treating images in their native aspect ratio and without the need to resize them to the same fixed size. In particular, we start from the original pre-trained SigLIP model (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions. """ def __init__(self, config: Idefics3VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.num_patches_per_side = self.image_size // self.patch_size self.num_patches = self.num_patches_per_side**2 self.num_positions = self.num_patches self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor: batch_size, _, max_im_h, max_im_w = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0) for batch_idx, p_attn_mask in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w) bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings
class_definition
7,241
10,074
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,276
class Idefics3VisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__ def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) # Ignore copy self.is_causal = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) k_v_seq_len = key_states.shape[-2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): raise ValueError( f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights
class_definition
10,178
13,667
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,277
class Idefics3VisionFlashAttention2(Idefics3VisionAttention): """ Idefics3Vision flash attention module. This module inherits from `Idefics3VisionAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (Idefics3VisionRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights
class_definition
13,785
18,512
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,278
class Idefics3VisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
18,745
19,325
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,279
class Idefics3SimpleMLP(nn.Module): def __init__(self, config): super().__init__() input_size = config.vision_config.hidden_size * (config.scale_factor**2) output_size = config.text_config.hidden_size self.proj = nn.Linear(input_size, output_size, bias=False) def forward(self, x): return self.proj(x)
class_definition
19,328
19,678
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,280
class Idefics3EncoderLayer(nn.Module): def __init__(self, config: Idefics3VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Idefics3VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
19,787
21,778
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,281
class Idefics3Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Idefics3EncoderLayer`]. Args: config: Idefics3Config """ def __init__(self, config: Idefics3Config): super().__init__() self.config = config self.layers = nn.ModuleList([Idefics3EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Ignore copy def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
21,874
25,767
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,282
class Idefics3RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Idefics3RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class_definition
26,533
27,259
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,283
class Idefics3Connector(nn.Module): def __init__(self, config): super().__init__() self.scale_factor = config.scale_factor self.modality_projection = Idefics3SimpleMLP(config) def pixel_shuffle(self, x, scale_factor=2): bsz, seq, embed_dim = x.size() height = width = int(seq**0.5) x = x.view(bsz, height, width, embed_dim) x = x.view(bsz, height, int(width / scale_factor), embed_dim * scale_factor) x = x.permute(0, 2, 1, 3) x = x.reshape(bsz, int(width / scale_factor), int(height / scale_factor), embed_dim * (scale_factor**2)) x = x.permute(0, 2, 1, 3) x = x.reshape(bsz, int(seq / (scale_factor**2)), embed_dim * (scale_factor**2)) return x def forward(self, image_hidden_states): image_hidden_states = self.pixel_shuffle(image_hidden_states, self.scale_factor) image_hidden_states = self.modality_projection(image_hidden_states) return image_hidden_states
class_definition
27,262
28,258
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,284
class Idefics3PreTrainedModel(PreTrainedModel): config_class = Idefics3Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Idefics3VisionAttention", "Idefics3DecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2PreTrainedModel._init_weights def _init_weights(self, module): std = ( self.config.text_config.initializer_range if hasattr(self.config, "initializer_range") else self.config.text_config.initializer_range ) if hasattr(module, "class_embedding"): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
29,320
30,559
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,285
class Idefics3VisionTransformer(Idefics3PreTrainedModel): config_class = Idefics3VisionConfig _supports_sdpa = False def __init__(self, config: Idefics3VisionConfig): super().__init__(config) embed_dim = config.hidden_size self.embeddings = Idefics3VisionEmbeddings(config) self.encoder = Idefics3Encoder(config) self.patch_size = config.patch_size self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2VisionTransformer.get_input_embeddings def get_input_embeddings(self): return self.embeddings # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2VisionTransformer.set_input_embeddings def set_input_embeddings(self, value): self.embeddings = value def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.patch_size patch_attention_mask = torch.ones( ( batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size, ) ) patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device) hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) patch_attention_mask = patch_attention_mask.view(batch_size, -1) # The call to `_upad_input` in `_flash_attention_forward` is expensive # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence), # avoiding passing the attention_mask, which is equivalent to attending to the full sequence if not torch.any(~patch_attention_mask): patch_attention_mask = None elif not self._use_flash_attention_2: patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) if not return_dict: return (last_hidden_state,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
31,596
35,063
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,286
class Idefics3Model(Idefics3PreTrainedModel): def __init__(self, config: Idefics3Config): super().__init__(config) self.padding_idx = self.config.text_config.pad_token_id self.vocab_size = self.config.text_config.vocab_size self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config) self.connector = Idefics3Connector(config) self.text_model = AutoModel.from_config(config.text_config) self.image_seq_len = int( ((config.vision_config.image_size // config.vision_config.patch_size) ** 2) / (config.scale_factor**2) ) self.image_token_id = self.config.image_token_id self._use_flash_attention_2 = config.text_config._attn_implementation == "flash_attention_2" self.post_init() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.enable_input_require_grads def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for lora when using gradient checkpointing. c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032 Override to set output.requires_grad = True for both the decoder's and vision model's embeddings. """ def get_lowest_module(module): if len(list(module.children())) == 0: # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.) return module else: # Recursively call the function on each child module return get_lowest_module(list(module.children())[0]) def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook( make_inputs_require_grads ) # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.disable_input_require_grads def disable_input_require_grads(self): self._text_require_grads_hook.remove() self._vision_require_grads_hook.remove() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.get_input_embeddings def get_input_embeddings(self): return self.text_model.get_input_embeddings() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.set_input_embeddings def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) def inputs_merger( self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor], ): """ This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM. The merging happens as follows: - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`. - We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space. We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer. - The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM. - To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states. """ num_images, _, vision_hidden_size = image_hidden_states.shape special_image_token_mask = input_ids == self.image_token_id # Fixes RuntimeError: a leaf Variable that requires grad is being used in an in-place operation. new_inputs_embeds = inputs_embeds.clone() reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size) # cast to the dtype of the input_embeds to support quantized models reshaped_image_hidden_states = reshaped_image_hidden_states.to(inputs_embeds.device, inputs_embeds.dtype) new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states return new_inputs_embeds @add_start_docstrings_to_model_forward( """ Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where max_num_images is the maximum number of images among the batch_size samples in the batch. Padding images are not needed beyond padding the pixel_values at the entrance of the model. For efficiency, we only pass through the vision_model's forward the real images by discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. """, IDEFICS3_INPUTS_DOCSTRING, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Idefics3BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training and self.text_model.gradient_checkpointing and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # retrieve input_ids and inputs_embeds if input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_seen_tokens = 0 if use_cache: if past_key_values is None: past_key_values = DynamicCache() past_seen_tokens = past_key_values.get_seq_length() if inputs_embeds is not None and input_ids is None and past_seen_tokens == 0: raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.") if inputs_embeds is None: inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(self.device) # START VISUAL INPUTS INTEGRATION if pixel_values is not None and image_hidden_states is not None: raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time") elif pixel_values is not None: batch_size, num_images, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:]) # Remove padding images - padding images are full 0. nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image pixel_values = pixel_values[real_images_inds].contiguous() # Handle the vision attention mask if pixel_attention_mask is None: pixel_attention_mask = torch.ones( size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device, ) else: # Remove padding images from the mask pixel_attention_mask = pixel_attention_mask.view( batch_size * num_images, *pixel_attention_mask.shape[2:] ) pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, ).last_hidden_state # Modality projection & resampling image_hidden_states = self.connector(image_hidden_states) elif image_hidden_states is not None: image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device) if past_seen_tokens == 0 and inputs_embeds is not None and image_hidden_states is not None: # When we generate, we don't want to replace the potential image_token_id that we generated by images # that simply don't exist inputs_embeds = self.inputs_merger( input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states, ) outputs = self.text_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return tuple(v for v in [*outputs, image_hidden_states] if v is not None) return Idefics3BaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states, )
class_definition
39,903
51,379
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,287
class Idefics3ForConditionalGeneration(Idefics3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.__init__ with Idefics2->Idefics3 def __init__(self, config): super().__init__(config) self.model = Idefics3Model(config) self.image_token_id = self.config.image_token_id self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.vocab_size = config.text_config.vocab_size # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.enable_input_require_grads def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook( make_inputs_require_grads ) # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.disable_input_require_grads def disable_input_require_grads(self): self._text_require_grads_hook.remove() self._vision_require_grads_hook.remove() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.get_input_embeddings def get_input_embeddings(self): return self.model.text_model.get_input_embeddings() # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.set_input_embeddings def set_input_embeddings(self, value): self.model.text_model.set_input_embeddings(value) # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.get_output_embeddings def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(IDEFICS3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Idefics3CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Idefics3CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`). Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModelForVision2Seq >>> from transformers.image_utils import load_image >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3") >>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3", torch_dtype=torch.bfloat16, device_map="auto") >>> # Create inputs >>> messages = [ ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."}, ... {"type": "image"}, ... {"type": "text", "text": "What can we see in this image?"}, ... ] ... }, ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In which city is that bridge located?"}, ... ] ... } ... ] >>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages] >>> images = [[image1, image2], [image3]] >>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=256) >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generated_texts[0]) Assistant: There are buildings, trees, lights, and water visible in this image. >>> print(generated_texts[1]) Assistant: The bridge is in San Francisco. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() labels = labels.to(logits.device) # Shift so that tokens < n predict n if attention_mask is not None: # we use the input attention mask to shift the logits and labels, because it is 2D. # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Idefics3CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, num_logits_to_keep=None, **kwargs, ): # Overwritten -- there are mutually exclusive inputs (if the logic to make `image_hidden_states` take # precedence is moved to the model, we can remove this fn) # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens if past_key_values is not None: if inputs_embeds is not None: # Exception 1 input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step # but IDEFICS requires noth ids and embeds to be present if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": input_ids} else: # The clone here is for the same reason as for `position_ids`. model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} if num_logits_to_keep is not None: model_inputs["num_logits_to_keep"] = num_logits_to_keep if image_hidden_states is not None: pixel_values = None pixel_attention_mask = None else: pixel_values = pixel_values pixel_attention_mask = pixel_attention_mask model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "pixel_values": pixel_values, "pixel_attention_mask": pixel_attention_mask, "image_hidden_states": image_hidden_states, } ) return model_inputs # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration._update_model_kwargs_for_generation def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs): model_kwargs = super()._update_model_kwargs_for_generation( outputs=outputs, model_kwargs=model_kwargs, is_encoder_decoder=is_encoder_decoder, **kwargs, ) # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs
class_definition
51,576
64,148
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/idefics3/modeling_idefics3.py
null
7,288
class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an PatchTST model according to the specified arguments, defining the model architecture. [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_input_channels (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. context_length (`int`, *optional*, defaults to 32): The context length of the input sequence. distribution_output (`str`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". loss (`str`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". patch_length (`int`, *optional*, defaults to 1): Define the patch length of the patchification process. patch_stride (`int`, *optional*, defaults to 1): Define the stride of the patchification process. num_hidden_layers (`int`, *optional*, defaults to 3): Number of hidden layers. d_model (`int`, *optional*, defaults to 128): Dimensionality of the transformer layers. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. share_embedding (`bool`, *optional*, defaults to `True`): Sharing the input embedding across all channels. channel_attention (`bool`, *optional*, defaults to `False`): Activate channel attention block in the Transformer to allow channels to attend each other. ffn_dim (`int`, *optional*, defaults to 512): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. norm_type (`str` , *optional*, defaults to `"batchnorm"`): Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the attention probabilities. positional_dropout (`float`, *optional*, defaults to 0.0): The dropout probability in the positional embedding layer. path_dropout (`float`, *optional*, defaults to 0.0): The dropout path in the residual block. ff_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward networks. bias (`bool`, *optional*, defaults to `True`): Whether to add bias in the feed-forward networks. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. pre_norm (`bool`, *optional*, defaults to `True`): Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is applied after residual block. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. use_cls_token (`bool`, *optional*, defaults to `False`): Whether cls token is used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. share_projection (`bool`, *optional*, defaults to `True`): Sharing the projection layer across different channels in the forecast head. scaling (`Union`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". do_mask_input (`bool`, *optional*): Apply masking during the pretraining. mask_type (`str`, *optional*, defaults to `"random"`): Masking type. Only `"random"` and `"forecast"` are currently supported. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio applied to mask the input data during random pretraining. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. channel_consistent_masking (`bool`, *optional*, defaults to `False`): If channel consistent masking is True, all the channels will have the same masking pattern. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked during pretraining. Values in the list are number between 1 and `num_input_channels` mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. pooling_type (`str`, *optional*, defaults to `"mean"`): Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. head_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for head. prediction_length (`int`, *optional*, defaults to 24): The prediction horizon that the model will output. num_targets (`int`, *optional*, defaults to 1): Number of targets for regression and classification tasks. For classification, it is the number of classes. output_range (`list`, *optional*): Output range for regression task. The range of output values can be set to enforce the model to produce values within a range. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples is generated in parallel for probabilistic prediction. ```python >>> from transformers import PatchTSTConfig, PatchTSTModel >>> # Initializing an PatchTST configuration with 12 time steps for prediction >>> configuration = PatchTSTConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtst" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_attention_heads", "num_hidden_layers": "num_hidden_layers", } def __init__( self, # time series specific configuration num_input_channels: int = 1, context_length: int = 32, distribution_output: str = "student_t", loss: str = "mse", # PatchTST arguments patch_length: int = 1, patch_stride: int = 1, # Transformer architecture configuration num_hidden_layers: int = 3, d_model: int = 128, num_attention_heads: int = 4, share_embedding: bool = True, channel_attention: bool = False, ffn_dim: int = 512, norm_type: str = "batchnorm", norm_eps: float = 1e-05, attention_dropout: float = 0.0, positional_dropout: float = 0.0, path_dropout: float = 0.0, ff_dropout: float = 0.0, bias: bool = True, activation_function: str = "gelu", pre_norm: bool = True, positional_encoding_type: str = "sincos", use_cls_token: bool = False, init_std: float = 0.02, share_projection: bool = True, scaling: Optional[Union[str, bool]] = "std", # mask pretraining do_mask_input: Optional[bool] = None, mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], channel_consistent_masking: Optional[bool] = False, unmasked_channel_indices: Optional[List[int]] = None, mask_value: int = 0, # head pooling_type: str = "mean", head_dropout: float = 0.0, prediction_length: int = 24, num_targets: int = 1, output_range: Optional[List] = None, # distribution head num_parallel_samples: int = 100, **kwargs, ): # time series specific configuration self.context_length = context_length self.num_input_channels = num_input_channels # n_vars self.loss = loss self.distribution_output = distribution_output self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.d_model = d_model self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.attention_dropout = attention_dropout self.share_embedding = share_embedding self.channel_attention = channel_attention self.norm_type = norm_type self.norm_eps = norm_eps self.positional_dropout = positional_dropout self.path_dropout = path_dropout self.ff_dropout = ff_dropout self.bias = bias self.activation_function = activation_function self.pre_norm = pre_norm self.positional_encoding_type = positional_encoding_type self.use_cls_token = use_cls_token self.init_std = init_std self.scaling = scaling # PatchTST parameters self.patch_length = patch_length self.patch_stride = patch_stride # Mask pretraining self.do_mask_input = do_mask_input self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio # for random masking self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking self.channel_consistent_masking = channel_consistent_masking self.unmasked_channel_indices = unmasked_channel_indices self.mask_value = mask_value # general head params self.pooling_type = pooling_type self.head_dropout = head_dropout # For prediction head self.share_projection = share_projection self.prediction_length = prediction_length # For prediction and regression head self.num_parallel_samples = num_parallel_samples # Regression self.num_targets = num_targets self.output_range = output_range super().__init__(**kwargs)
class_definition
847
12,283
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/configuration_patchtst.py
null
7,289
class PatchTSTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PatchTSTConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
class_definition
1,269
8,667
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,290
class PatchTSTBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSTConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) output = self.batchnorm(output) return output.transpose(1, 2)
class_definition
8,670
9,437
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,291
class PatchTSTPatchify(nn.Module): """ A class to patchify the time series sequence into different patches Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ def __init__(self, config: PatchTSTConfig): super().__init__() self.sequence_length = config.context_length self.patch_length = config.patch_length self.patch_stride = config.patch_stride if self.sequence_length <= self.patch_length: raise ValueError( f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" ) # get the number of patches self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) self.sequence_start = self.sequence_length - new_sequence_length def forward(self, past_values: torch.Tensor): """ Parameters: past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): Input for patchification Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ sequence_length = past_values.shape[-2] if sequence_length != self.sequence_length: raise ValueError( f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." ) # output: [bs x new_sequence_length x num_channels] output = past_values[:, self.sequence_start :, :] # output: [bs x num_patches x num_input_channels x patch_length] output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) # output: [bs x num_input_channels x num_patches x patch_length] output = output.transpose(-2, -3).contiguous() return output
class_definition
14,779
16,826
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,292
class PatchTSTMasking(nn.Module): """ Class to perform random or forecast masking. Parameters: config (`PatchTSTConfig`): model config Returns: x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ def __init__(self, config: PatchTSTConfig): super().__init__() self.random_mask_ratio = config.random_mask_ratio self.channel_consistent_masking = config.channel_consistent_masking self.mask_type = config.mask_type self.num_forecast_mask_patches = config.num_forecast_mask_patches self.unmasked_channel_indices = config.unmasked_channel_indices self.mask_value = config.mask_value if self.unmasked_channel_indices is not None: self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input Return: masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ if self.mask_type == "random": masked_input, mask = random_masking( inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value, ) elif self.mask_type == "forecast": masked_input, mask = forecast_masking( inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value, ) else: raise ValueError(f"Invalid mask type {self.mask_type}.") # mask: [bs x num_input_channels x num_patch] mask = mask.bool() return masked_input, mask
class_definition
16,829
19,315
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,293
class PatchTSTEncoderLayer(nn.Module): """ PatchTST encoder layer """ def __init__(self, config: PatchTSTConfig): super().__init__() self.channel_attention = config.channel_attention # Multi-Head attention self.self_attn = PatchTSTAttention( embed_dim=config.d_model, num_heads=config.num_attention_heads, dropout=config.attention_dropout, ) # Add & Norm of the sublayer 1 self.dropout_path1 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer1 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") # Add & Norm of the sublayer 2 if self.channel_attention: self.dropout_path2 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer2 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") # Position-wise Feed-Forward self.ff = nn.Sequential( nn.Linear(config.d_model, config.ffn_dim, bias=config.bias), ACT2CLS[config.activation_function](), nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), nn.Linear(config.ffn_dim, config.d_model, bias=config.bias), ) # Add & Norm of sublayer 3 self.dropout_path3 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer3 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") self.pre_norm = config.pre_norm def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None): """ Parameters: hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*): Past values of the time series output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers Return: `torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)` """ batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape # First sublayer: attention across time # hidden_states: [(bs*num_channels) x sequence_length x d_model] hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) if self.pre_norm: ## Norm and Multi-Head attention and Add residual connection attn_output, attn_weights, _ = self.self_attn( hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions ) # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path1(attn_output) else: ## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT attn_output, attn_weights, _ = self.self_attn( hidden_states=hidden_state, output_attentions=output_attentions ) # hidden_states: [(bs*num_channels) x sequence_length x d_model] hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output)) # hidden_state: [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) # second sublayer: attention across variable at any given time if self.channel_attention: # hidden_state: [bs x sequence_length x num_channels x d_model] hidden_state = hidden_state.transpose(2, 1).contiguous() # hidden_state: [(bs*sequence_length) x num_channels x d_model] hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model) if self.pre_norm: ## Norm and Multi-Head attention and Add residual connection attn_output, channel_attn_weights, _ = self.self_attn( hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions ) # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path2(attn_output) else: ## Multi-Head attention and Add residual connection and Norm attn_output, channel_attn_weights, _ = self.self_attn( hidden_states=hidden_state, output_attentions=output_attentions ) # hidden_states: [(bs*sequence_length) x num_channels x d_model] hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output)) # Reshape hidden state # hidden_state: [bs x sequence_length x num_channels x d_model] hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model) # hidden_state: [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.transpose(1, 2).contiguous() # Third sublayer: mixing across hidden # hidden_state: [(batch_size*num_channels) x sequence_length x d_model] hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) if self.pre_norm: ## Norm and Position-wise Feed-Forward and Add residual connection # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state))) else: ## Position-wise Feed-Forward and Add residual connection and Norm # Add: residual connection with residual dropout hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state))) # [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) outputs = (hidden_state,) if output_attentions: outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,) return outputs
class_definition
19,318
26,346
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,294
class PatchTSTPreTrainedModel(PreTrainedModel): config_class = PatchTSTConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = False def _init_weights(self, module): """ Initialize weights """ if isinstance(module, PatchTSTPositionalEncoding): # initialize cls_token if self.config.use_cls_token: nn.init.normal_(module.cls_token, std=0.02) # initialize positional encoding if self.config.positional_encoding_type == "random": nn.init.normal_(module.position_enc, mean=0.0, std=0.1) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PatchTSTBatchNorm): module.batchnorm.bias.data.zero_() module.batchnorm.weight.data.fill_(1.0) elif isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (PatchTSTEncoder)): module.gradient_checkpointing = value
class_definition
26,349
27,664
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,295
class PatchTSTEmbedding(nn.Module): def __init__(self, config: PatchTSTConfig): super().__init__() self.num_input_channels = config.num_input_channels self.share_embedding = config.share_embedding # Input encoding: projection of feature vectors onto a d-dim vector space if self.share_embedding: self.input_embedding = nn.Linear(config.patch_length, config.d_model) else: self.input_embedding = nn.ModuleList() for _ in range(config.num_input_channels): self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input for embedding return: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` """ # Input encoding num_input_channels = patch_input.shape[1] if num_input_channels != self.num_input_channels: raise ValueError( f"The defined number of input channels ({self.num_input_channels}) in the config " f"has to be the same as the number of channels in the batch input ({num_input_channels})" ) if self.share_embedding: embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] else: embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] embeddings = torch.stack(embeddings, dim=1) return embeddings
class_definition
27,667
29,391
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,296
class PatchTSTPositionalEncoding(nn.Module): """ Class for positional encoding """ def __init__(self, config: PatchTSTConfig, num_patches: int): super().__init__() self.use_cls_token = config.use_cls_token self.num_input_channels = config.num_input_channels if config.use_cls_token: # cls_token: [1 x num_input_channels x 1 x d_model] self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model)) num_patches += 1 # postional encoding: [num_patches x d_model] self.position_enc = self._init_pe(config, num_patches) # Positional dropout self.positional_dropout = ( nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity() ) @staticmethod def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter: # Positional encoding if config.positional_encoding_type == "random": position_enc = nn.Parameter(torch.randn(num_patches, config.d_model), requires_grad=True) elif config.positional_encoding_type == "sincos": position_enc = torch.zeros(num_patches, config.d_model) position = torch.arange(0, num_patches).unsqueeze(1) div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) position_enc[:, 0::2] = torch.sin(position * div_term) position_enc[:, 1::2] = torch.cos(position * div_term) position_enc = position_enc - position_enc.mean() position_enc = position_enc / (position_enc.std() * 10) position_enc = nn.Parameter(position_enc, requires_grad=False) else: raise ValueError( f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." ) return position_enc def forward(self, patch_input: torch.Tensor): if self.use_cls_token: # patch_input: [bs x num_channels x num_patches x d_model] patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :]) # append cls token where cls_token: [1 x num_channels x 1 x d_model] cls_token = self.cls_token + self.position_enc[:1, :] # get the same copy of cls_token for all the samples in batch: [bs x num_channels x 1 x d_model] cls_tokens = cls_token.expand(patch_input.shape[0], self.num_input_channels, -1, -1) # hidden_state: [bs x num_channels x (num_patches+1) x d_model] hidden_state = torch.cat((cls_tokens, patch_input), dim=2) else: # hidden_state: [bs x num_channels x num_patches x d_model] hidden_state = self.positional_dropout(patch_input + self.position_enc) return hidden_state
class_definition
29,394
32,272
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,297
class PatchTSTEncoder(PatchTSTPreTrainedModel): """ PatchTST Encoder """ def __init__(self, config: PatchTSTConfig, num_patches: int): super().__init__(config) self.gradient_checkpointing = False # Input embedding: projection of feature vectors onto a d-dim vector space self.embedder = PatchTSTEmbedding(config) # Positional encoding self.positional_encoder = PatchTSTPositionalEncoding(config, num_patches) # Encoder self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.num_hidden_layers)]) # Initialize weights and apply final processing self.post_init() def forward( self, patch_input: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, ) -> BaseModelOutput: """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Past values of the time series output_hidden_states (bool, optional): Indicates if hidden states should be outputted. output_attentions (bool, optional): Indicates if attentions should be outputted. return: `BaseModelOutput` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # Input embedding patch_input = self.embedder(patch_input) # Positional encoding hidden_state = self.positional_encoder(patch_input) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_state,) layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions) # get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model] # or [bs x num_channels x (num_patches+1) x d_model] if use cls_token hidden_state = layer_outputs[0] # append attention matrix at each layer if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # return past_values, hidden_states return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions)
class_definition
32,275
34,951
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,298
class PatchTSTModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states. Parameters: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*) Bool masked tensor indicating which patches are masked loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Patched input to the Transformer """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None mask: torch.FloatTensor = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None patch_input: torch.FloatTensor = None
class_definition
35,847
37,693
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtst/modeling_patchtst.py
null
7,299