text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] )
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DetaEncoder(config) self.decoder = DetaDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) self.pix_trans = nn.Linear(config.d_model, config.d_model) self.pix_trans_norm = nn.LayerNorm(config.d_model) else: self.reference_points = nn.Linear(config.d_model, 2) self.assign_first_stage = config.assign_first_stage self.two_stage_num_proposals = config.two_stage_num_proposals self.post_init()
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.model.named_parameters(): param.requires_grad_(True) def get_valid_ratio(self, mask, dtype=torch.float32): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_height = valid_height.to(dtype) / height valid_ratio_width = valid_width.to(dtype) / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1) return valid_ratio def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals."""
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
num_pos_feats = self.config.d_model // 2 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.int64, device=proposals.device).float() dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output.
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps.
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 level_ids = [] for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) level_ids = torch.cat(level_ids) return object_query, output_proposals, level_ids
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
@add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], DetaModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaModel >>> from PIL import Image >>> import requests
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large-o365") >>> model = DetaModel.from_pretrained("jozhang97/deta-swin-large-o365", two_stage=False) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 900, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided")
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Prepare encoder inputs (by flattening) spatial_shapes = [(source.shape[2:]) for source in sources] source_flatten = [source.flatten(2).transpose(1, 2) for source in sources] mask_flatten = [mask.flatten(1) for mask in masks] lvl_pos_embed_flatten = [] for level, pos_embed in enumerate(position_embeddings_list): pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed)
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float()
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput(
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, )
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None output_proposals = None if self.config.two_stage: object_query_embedding, output_proposals, level_ids = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage DETA # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# only keep top scoring `config.two_stage_num_proposals` proposals topk = self.two_stage_num_proposals proposal_logit = enc_outputs_class[..., 0] if self.assign_first_stage: proposal_boxes = center_to_corners_format(enc_outputs_coord_logits.sigmoid().float()).clamp(0, 1) topk_proposals = [] for b in range(batch_size): prop_boxes_b = proposal_boxes[b] prop_logits_b = proposal_logit[b] # pre-nms per-level topk pre_nms_topk = 1000 pre_nms_inds = [] for lvl in range(len(spatial_shapes)): lvl_mask = level_ids == lvl pre_nms_inds.append(torch.topk(prop_logits_b.sigmoid() * lvl_mask, pre_nms_topk)[1]) pre_nms_inds = torch.cat(pre_nms_inds)
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# nms on topk indices post_nms_inds = batched_nms( prop_boxes_b[pre_nms_inds], prop_logits_b[pre_nms_inds], level_ids[pre_nms_inds], 0.9 ) keep_inds = pre_nms_inds[post_nms_inds] if len(keep_inds) < self.two_stage_num_proposals: print( f"[WARNING] nms proposals ({len(keep_inds)}) < {self.two_stage_num_proposals}, running" " naive topk" ) keep_inds = torch.topk(proposal_logit[b], topk)[1]
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# keep top Q/L indices for L levels q_per_l = topk // len(spatial_shapes) is_level_ordered = ( level_ids[keep_inds][None] == torch.arange(len(spatial_shapes), device=level_ids.device)[:, None] ) keep_inds_mask = is_level_ordered & (is_level_ordered.cumsum(1) <= q_per_l) # LS keep_inds_mask = keep_inds_mask.any(0) # S # pad to Q indices (might let ones filtered from pre-nms sneak by... unlikely because we pick high conf anyways) if keep_inds_mask.sum() < topk: num_to_add = topk - keep_inds_mask.sum() pad_inds = (~keep_inds_mask).nonzero()[:num_to_add] keep_inds_mask[pad_inds] = True
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
keep_inds_topk = keep_inds[keep_inds_mask] topk_proposals.append(keep_inds_topk) topk_proposals = torch.stack(topk_proposals) else: topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2)
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
topk_feats = torch.stack( [object_query_embedding[b][topk_proposals[b]] for b in range(batch_size)] ).detach() target = target + self.pix_trans_norm(self.pix_trans(topk_feats)) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
return DetaModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, output_proposals=output_proposals, )
10,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaForObjectDetection(DetaPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _tied_weights_keys = [r"bbox_embed\.\d+", r"class_embed\.\d+"] # We can't initialize the model on meta device as some weights are modified during the initialization _no_split_modules = None def __init__(self, config: DetaConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DetaModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DetaMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 )
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Initialize weights and apply final processing self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. aux_loss = [ {"logits": logits, "pred_boxes": pred_boxes} for logits, pred_boxes in zip(outputs_class.transpose(0, 1)[:-1], outputs_coord.transpose(0, 1)[:-1]) ] return aux_loss
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
@add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], DetaObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large") >>> model = DetaForObjectDetection.from_pretrained("jozhang97/deta-swin-large") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs)
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.802 at location [9.87, 54.36, 316.93, 473.44] Detected cat with confidence 0.795 at location [346.62, 24.35, 639.62, 373.2] Detected remote with confidence 0.725 at location [40.41, 73.36, 175.77, 117.29] Detected remote with confidence 0.638 at location [333.34, 76.81, 370.22, 187.94]
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Detected couch with confidence 0.584 at location [0.03, 0.99, 640.02, 474.93] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = []
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1)
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1]
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DetaHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DetaLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, num_queries=self.config.num_queries, assign_first_stage=self.config.assign_first_stage, assign_second_stage=self.config.assign_second_stage, ) criterion.to(logits.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
outputs_loss["pred_boxes"] = pred_boxes outputs_loss["init_reference"] = init_reference if self.config.auxiliary_loss: auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs_loss["enc_outputs"] = { "logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord, "anchors": outputs.output_proposals.sigmoid(), }
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) aux_weight_dict.update({k + "_enc": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
dict_outputs = DetaObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
output_proposals=outputs.output_proposals, )
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
return dict_outputs
10,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaLoss(nn.Module): """ This class computes the losses for `DetaForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervised class and box). Args: matcher (`DetaHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def __init__( self, matcher, num_classes, focal_alpha, losses, num_queries, assign_first_stage=False, assign_second_stage=False, ): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if self.assign_first_stage: self.stg1_assigner = DetaStage1Assigner() if self.assign_second_stage: self.stg2_assigner = DetaStage2Assigner(num_queries)
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes)
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k not in ("auxiliary_outputs", "enc_outputs")} # Retrieve the matching between the outputs of the last layer and the targets if self.assign_second_stage: indices = self.stg2_assigner(outputs_without_aux, targets) else: indices = self.matcher(outputs_without_aux, targets)
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # Check that we have initialized the distributed state world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): if not self.assign_second_stage: indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict)
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["class_labels"] = torch.zeros_like(bt["class_labels"]) if self.assign_first_stage: indices = self.stg1_assigner(enc_outputs, bin_targets) else: indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses
10,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x
10,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"])
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0")
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
@torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets])
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
# Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
10,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaMatcher: """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False): """ Args: thresholds (`list[float]`): A list of thresholds used to stratify predictions into levels. labels (`list[int`): A list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (`bool`, *optional*, defaults to `False`): If `True`, produce additional matches for predictions with maximum match quality lower than high_threshold. See `set_low_quality_matches_` for more details.
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ # Add -inf and +inf to first and last position in thresholds thresholds = thresholds[:] if thresholds[0] < 0: raise ValueError("Thresholds should be positive") thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) # Currently torchscript does not support all + generator if not all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])): raise ValueError("Thresholds should be sorted.") if not all(l in [-1, 0, 1] for l in labels):
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
raise ValueError("All labels should be either -1, 0 or 1") if len(labels) != len(thresholds) - 1: raise ValueError("Number of labels should be equal to number of thresholds - 1") self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`).
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G.
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
This function implements the RPN assignment case (i) in Sec. 3.1.2 of :paper:`Faster R-CNN`. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. _, pred_inds_with_highest_quality = nonzero_tuple(match_quality_matrix == highest_quality_foreach_gt[:, None]) # If an anchor was labeled positive only due to a low-quality match # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. # This follows the implementation in Detectron, and is found to have no significant impact. match_labels[pred_inds_with_highest_quality] = 1
10,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaStage2Assigner(nn.Module): def __init__(self, num_queries, max_k=4): super().__init__() self.positive_fraction = 0.25 self.bg_label = 400 # number > 91 to filter out later self.batch_size_per_image = num_queries self.proposal_matcher = DetaMatcher(thresholds=[0.6], labels=[0, 1], allow_low_quality_matches=True) self.k = max_k def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor): """ Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification labels. Args: matched_idxs (Tensor): a vector of length N, each is the best-matched gt index in [0, M) for each proposal. matched_labels (Tensor): a vector of length N, the matcher's label (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. gt_classes (Tensor): a vector of length M.
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Returns: Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length, the classification label for each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the background (num_classes). """ has_gt = gt_classes.numel() > 0 # Get the corresponding GT for each proposal if has_gt: gt_classes = gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[matched_labels == 0] = self.bg_label # Label ignore proposals (-1 label) gt_classes[matched_labels == -1] = -1 else: gt_classes = torch.zeros_like(matched_idxs) + self.bg_label sampled_fg_idxs, sampled_bg_idxs = subsample_labels( gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label )
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) return sampled_idxs, gt_classes[sampled_idxs] def forward(self, outputs, targets, return_cost_matrix=False): # COCO categories are from 1 to 90. They set num_classes=91 and apply sigmoid.
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
bs = len(targets) indices = [] ious = [] for b in range(bs): iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(outputs["init_reference"][b].detach()), ) matched_idxs, matched_labels = self.proposal_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.6, 0 ow] ( sampled_idxs, sampled_gt_classes, ) = self._sample_proposals( # list of sampled proposal_ids, sampled_id -> [0, num_classes)+[bg_label] matched_idxs, matched_labels, targets[b]["class_labels"] ) pos_pr_inds = sampled_idxs[sampled_gt_classes != self.bg_label] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) indices.append((pos_pr_inds, pos_gt_inds))
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
ious.append(iou) if return_cost_matrix: return indices, ious return indices
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)
10,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaStage1Assigner(nn.Module): def __init__(self, t_low=0.3, t_high=0.7, max_k=4): super().__init__() self.positive_fraction = 0.5 self.batch_size_per_image = 256 self.k = max_k self.t_low = t_low self.t_high = t_high self.anchor_matcher = DetaMatcher( thresholds=[t_low, t_high], labels=[0, -1, 1], allow_low_quality_matches=True ) def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample.
10,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label
10,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
def forward(self, outputs, targets): bs = len(targets) indices = [] for b in range(bs): anchors = outputs["anchors"][b] if len(targets[b]["boxes"]) == 0: indices.append( ( torch.tensor([], dtype=torch.long, device=anchors.device), torch.tensor([], dtype=torch.long, device=anchors.device), ) ) continue iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(anchors), ) matched_idxs, matched_labels = self.anchor_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.7, 0 if iou < 0.3, -1 ow] matched_labels = self._subsample_labels(matched_labels)
10,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
all_pr_inds = torch.arange(len(anchors), device=matched_labels.device) pos_pr_inds = all_pr_inds[matched_labels == 1] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) pos_pr_inds, pos_gt_inds = pos_pr_inds.to(anchors.device), pos_gt_inds.to(anchors.device) indices.append((pos_pr_inds, pos_gt_inds)) return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)
10,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/modeling_deta.py
class DetaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETA [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder.
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer.
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. class_cost (`float`, *optional*, defaults to 1):
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2):
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. num_feature_levels (`int`, *optional*, defaults to 5): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of DETA, which are further fed into the decoder for iterative bounding box refinement.
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
two_stage_num_proposals (`int`, *optional*, defaults to 300): The number of region proposals to be generated, in case `two_stage` is set to `True`. with_box_refine (`bool`, *optional*, defaults to `True`): Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes based on the predictions from the previous layer. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. assign_first_stage (`bool`, *optional*, defaults to `True`): Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7. assign_second_stage (`bool`, *optional*, defaults to `True`): Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure. disable_custom_kernels (`bool`, *optional*, defaults to `True`):
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export.
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
Examples: ```python >>> from transformers import DetaConfig, DetaModel >>> # Initializing a DETA SenseTime/deformable-detr style configuration >>> configuration = DetaConfig() >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration >>> model = DetaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "deta" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", }
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, num_queries=900, max_position_embeddings=2048, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=1024, decoder_attention_heads=8, encoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, return_intermediate=True, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=5, encoder_n_points=4, decoder_n_points=4, two_stage=True, two_stage_num_proposals=300, with_box_refine=True, assign_first_stage=True,
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
assign_second_stage=True, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, focal_alpha=0.25, disable_custom_kernels=True, **kwargs, ): if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.")
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"]) else: if isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage self.two_stage_num_proposals = two_stage_num_proposals self.with_box_refine = with_box_refine self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True.") # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py
@property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model
10,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/deta/configuration_deta.py