repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pool2d_same.py
""" AvgPool2d w/ Same Padding Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Tuple, Optional from .helpers import to_2tuple from .padding import pad_same, get_padding_value def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), ceil_mode: bool = False, count_include_pad: bool = True): # FIXME how to deal with count_include_pad vs not for external padding? x = pad_same(x, kernel_size, stride) return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) class AvgPool2dSame(nn.AvgPool2d): """ Tensorflow like 'SAME' wrapper for 2D average pooling """ def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride) return F.avg_pool2d( x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) def max_pool2d_same( x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), dilation: List[int] = (1, 1), ceil_mode: bool = False): x = pad_same(x, kernel_size, stride, value=-float('inf')) return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) class MaxPool2dSame(nn.MaxPool2d): """ Tensorflow like 'SAME' wrapper for 2D max pooling """ def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): stride = stride or kernel_size padding = kwargs.pop('padding', '') padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) if is_dynamic: if pool_type == 'avg': return AvgPool2dSame(kernel_size, stride=stride, **kwargs) elif pool_type == 'max': return MaxPool2dSame(kernel_size, stride=stride, **kwargs) else: assert False, f'Unsupported pool type {pool_type}' else: if pool_type == 'avg': return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) elif pool_type == 'max': return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) else: assert False, f'Unsupported pool type {pool_type}'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations.py
""" Activations A collection of activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F def swish(x, inplace: bool = False): """Swish - Described in: https://arxiv.org/abs/1710.05941 """ return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) class Swish(nn.Module): def __init__(self, inplace: bool = False): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): return swish(x, self.inplace) def mish(x, inplace: bool = False): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 NOTE: I don't have a working inplace variant """ return x.mul(F.softplus(x).tanh()) class Mish(nn.Module): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 """ def __init__(self, inplace: bool = False): super(Mish, self).__init__() def forward(self, x): return mish(x) def sigmoid(x, inplace: bool = False): return x.sigmoid_() if inplace else x.sigmoid() # PyTorch has this, but not with a consistent inplace argmument interface class Sigmoid(nn.Module): def __init__(self, inplace: bool = False): super(Sigmoid, self).__init__() self.inplace = inplace def forward(self, x): return x.sigmoid_() if self.inplace else x.sigmoid() def tanh(x, inplace: bool = False): return x.tanh_() if inplace else x.tanh() # PyTorch has this, but not with a consistent inplace argmument interface class Tanh(nn.Module): def __init__(self, inplace: bool = False): super(Tanh, self).__init__() self.inplace = inplace def forward(self, x): return x.tanh_() if self.inplace else x.tanh() def hard_swish(x, inplace: bool = False): inner = F.relu6(x + 3.).div_(6.) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): def __init__(self, inplace: bool = False): super(HardSwish, self).__init__() self.inplace = inplace def forward(self, x): return hard_swish(x, self.inplace) def hard_sigmoid(x, inplace: bool = False): if inplace: return x.add_(3.).clamp_(0., 6.).div_(6.) else: return F.relu6(x + 3.) / 6. class HardSigmoid(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoid, self).__init__() self.inplace = inplace def forward(self, x): return hard_sigmoid(x, self.inplace) def hard_mish(x, inplace: bool = False): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMish(nn.Module): def __init__(self, inplace: bool = False): super(HardMish, self).__init__() self.inplace = inplace def forward(self, x): return hard_mish(x, self.inplace) class PReLU(nn.PReLU): """Applies PReLU (w/ dummy inplace arg) """ def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: super(PReLU, self).__init__(num_parameters=num_parameters, init=init) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.prelu(input, self.weight) def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: return F.gelu(x) class GELU(nn.Module): """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) """ def __init__(self, inplace: bool = False): super(GELU, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input) def gelu_tanh(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: return F.gelu(x, approximate='tanh') class GELUTanh(nn.Module): """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) """ def __init__(self, inplace: bool = False): super(GELUTanh, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input, approximate='tanh') def quick_gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: return x * torch.sigmoid(1.702 * x) class QuickGELU(nn.Module): """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) """ def __init__(self, inplace: bool = False): super(QuickGELU, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return quick_gelu(input)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations_me.py
""" Activations (memory-efficient w/ custom autograd) A collection of activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. These activations are not compatible with jit scripting or ONNX export of the model, please use either the JIT or basic versions of the activations. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit_fwd(x): return x.mul(torch.sigmoid(x)) @torch.jit.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishJitAutoFn(torch.autograd.Function): """ torch.jit.script optimised Swish w/ memory-efficient checkpoint Inspired by conversation btw Jeremy Howard & Adam Pazske https://twitter.com/jeremyphoward/status/1188251041835315200 """ @staticmethod def symbolic(g, x): return g.op("Mul", x, g.op("Sigmoid", x)) @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishJitAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool = False): super(SwishMe, self).__init__() def forward(self, x): return SwishJitAutoFn.apply(x) @torch.jit.script def mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x))) @torch.jit.script def mish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishJitAutoFn(torch.autograd.Function): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 A memory efficient, jit scripted variant of Mish """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output) def mish_me(x, inplace=False): return MishJitAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool = False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x) @torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool = False): return (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. return grad_output * m class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool = False): return HardSigmoidJitAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x) @torch.jit.script def hard_swish_jit_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.) m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) return grad_output * m class HardSwishJitAutoFn(torch.autograd.Function): """A memory efficient, jit-scripted HardSwish activation""" @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) @staticmethod def symbolic(g, self): input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) return g.op("Mul", self, hardtanh_) def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x) @torch.jit.script def hard_mish_jit_fwd(x): return 0.5 * x * (x + 2).clamp(min=0, max=2) @torch.jit.script def hard_mish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= -2.) m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) return grad_output * m class HardMishJitAutoFn(torch.autograd.Function): """ A memory efficient, jit scripted variant of Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_mish_jit_bwd(x, grad_output) def hard_mish_me(x, inplace: bool = False): return HardMishJitAutoFn.apply(x) class HardMishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardMishMe, self).__init__() def forward(self, x): return HardMishJitAutoFn.apply(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/attention_pool2d.py
""" Attention Pool 2D Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. Based on idea in CLIP by OpenAI, licensed Apache 2.0 https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py Hacked together by / Copyright 2021 Ross Wightman """ from typing import Union, Tuple import torch import torch.nn as nn from .helpers import to_2tuple from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding from .weight_init import trunc_normal_ class RotAttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW """ def __init__( self, in_features: int, out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads assert embed_dim % num_heads == 0 self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 self.pos_embed = RotaryEmbedding(self.head_dim) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] qc, q = q[:, :, :1], q[:, :, 1:] sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) q = apply_rot_embed(q, sin_emb, cos_emb) q = torch.cat([qc, q], dim=2) kc, k = k[:, :, :1], k[:, :, 1:] k = apply_rot_embed(k, sin_emb, cos_emb) k = torch.cat([kc, k], dim=2) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0] class AttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. It was based on impl in CLIP by OpenAI https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. """ def __init__( self, in_features: int, feat_size: Union[int, Tuple[int, int]], out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.feat_size = to_2tuple(feat_size) self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 spatial_dim = self.feat_size[0] * self.feat_size[1] self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) trunc_normal_(self.pos_embed, std=in_features ** -0.5) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W assert self.feat_size[0] == H assert self.feat_size[1] == W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = x + self.pos_embed.unsqueeze(0).to(x.dtype) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0]
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/typing.py
from typing import Callable, Tuple, Type, Union import torch LayerType = Union[str, Callable, Type[torch.nn.Module]] PadType = Union[str, int, Tuple[int, int]]
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/config.py
""" Model / Layer Config singleton state """ import os import warnings from typing import Any, Optional import torch __all__ = [ 'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn', 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn' ] # Set to True if prefer to have layers with no jit optimization (includes activations) _NO_JIT = False # Set to True if prefer to have activation layers with no jit optimization # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying # the jit flags so far are activations. This will change as more layers are updated and/or added. _NO_ACTIVATION_JIT = False # Set to True if exporting a model with Same padding via ONNX _EXPORTABLE = False # Set to True if wanting to use torch.jit.script on a model _SCRIPTABLE = False # use torch.scaled_dot_product_attention where possible _HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention') if 'TIMM_FUSED_ATTN' in os.environ: _USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN']) else: _USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use) def is_no_jit(): return _NO_JIT class set_no_jit: def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False def is_exportable(): return _EXPORTABLE class set_exportable: def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False def is_scriptable(): return _SCRIPTABLE class set_scriptable: def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False class set_layer_config: """ Layer config context manager that allows setting all layer config flags at once. If a flag arg is None, it will not change the current value. """ def __init__( self, scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, no_activation_jit: Optional[bool] = None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT if scriptable is not None: _SCRIPTABLE = scriptable if exportable is not None: _EXPORTABLE = exportable if no_jit is not None: _NO_JIT = no_jit if no_activation_jit is not None: _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev return False def use_fused_attn(experimental: bool = False) -> bool: # NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0 if not _HAS_FUSED_ATTN or _EXPORTABLE: return False if experimental: return _USE_FUSED_ATTN > 1 return _USE_FUSED_ATTN > 0 def set_fused_attn(enable: bool = True, experimental: bool = False): global _USE_FUSED_ATTN if not _HAS_FUSED_ATTN: warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.') return if experimental and enable: _USE_FUSED_ATTN = 2 elif enable: _USE_FUSED_ATTN = 1 else: _USE_FUSED_ATTN = 0
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_attn.py
""" Attention Factory Hacked together by / Copyright 2021 Ross Wightman """ import torch from functools import partial from .bottleneck_attn import BottleneckAttn from .cbam import CbamModule, LightCbamModule from .eca import EcaModule, CecaModule from .gather_excite import GatherExcite from .global_context import GlobalContext from .halo_attn import HaloAttn from .lambda_layer import LambdaLayer from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .selective_kernel import SelectiveKernel from .split_attn import SplitAttn from .squeeze_excite import SEModule, EffectiveSEModule def get_attn(attn_type): if isinstance(attn_type, torch.nn.Module): return attn_type module_cls = None if attn_type: if isinstance(attn_type, str): attn_type = attn_type.lower() # Lightweight attention modules (channel and/or coarse spatial). # Typically added to existing network architecture blocks in addition to existing convolutions. if attn_type == 'se': module_cls = SEModule elif attn_type == 'ese': module_cls = EffectiveSEModule elif attn_type == 'eca': module_cls = EcaModule elif attn_type == 'ecam': module_cls = partial(EcaModule, use_mlp=True) elif attn_type == 'ceca': module_cls = CecaModule elif attn_type == 'ge': module_cls = GatherExcite elif attn_type == 'gc': module_cls = GlobalContext elif attn_type == 'gca': module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) elif attn_type == 'cbam': module_cls = CbamModule elif attn_type == 'lcbam': module_cls = LightCbamModule # Attention / attention-like modules w/ significant params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'sk': module_cls = SelectiveKernel elif attn_type == 'splat': module_cls = SplitAttn # Self-attention / attention-like modules w/ significant compute and/or params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'lambda': return LambdaLayer elif attn_type == 'bottleneck': return BottleneckAttn elif attn_type == 'halo': return HaloAttn elif attn_type == 'nl': module_cls = NonLocalAttn elif attn_type == 'bat': module_cls = BatNonLocalAttn # Woops! else: assert False, "Invalid attn module (%s)" % attn_type elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type return module_cls def create_attn(attn_type, channels, **kwargs): module_cls = get_attn(attn_type) if module_cls is not None: # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels return module_cls(channels, **kwargs) return None
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/conv_bn_act.py
""" Conv2d + BN + Act Hacked together by / Copyright 2020 Ross Wightman """ import functools from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class ConvNormAct(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=nn.ReLU, act_kwargs=None, drop_layer=None, ): super(ConvNormAct, self).__init__() norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # NOTE for backwards compatibility with models that use separate norm and act layer definitions norm_act_layer = get_norm_act_layer(norm_layer, act_layer) # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer( out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs, ) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) return x ConvBnAct = ConvNormAct def create_aa(aa_layer, channels, stride=2, enable=True): if not aa_layer or not enable: return nn.Identity() if isinstance(aa_layer, functools.partial): if issubclass(aa_layer.func, nn.AvgPool2d): return aa_layer() else: return aa_layer(channels) elif issubclass(aa_layer, nn.AvgPool2d): return aa_layer(stride) else: return aa_layer(channels=channels, stride=stride) class ConvNormActAa(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=nn.ReLU, act_kwargs=None, aa_layer=None, drop_layer=None, ): super(ConvNormActAa, self).__init__() use_aa = aa_layer is not None and stride == 2 norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # NOTE for backwards compatibility with models that use separate norm and act layer definitions norm_act_layer = get_norm_act_layer(norm_layer, act_layer) # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer(out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs) self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.aa(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/grn.py
""" Global Response Normalization Module Based on the GRN layer presented in `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 This implementation * works for both NCHW and NHWC tensor layouts * uses affine param names matching existing torch norm layers * slightly improves eager mode performance via fused addcmul Hacked together by / Copyright 2023 Ross Wightman """ import torch from torch import nn as nn class GlobalResponseNorm(nn.Module): """ Global Response Normalization layer """ def __init__(self, dim, eps=1e-6, channels_last=True): super().__init__() self.eps = eps if channels_last: self.spatial_dim = (1, 2) self.channel_dim = -1 self.wb_shape = (1, 1, 1, -1) else: self.spatial_dim = (2, 3) self.channel_dim = 1 self.wb_shape = (1, -1, 1, 1) self.weight = nn.Parameter(torch.zeros(dim)) self.bias = nn.Parameter(torch.zeros(dim)) def forward(self, x): x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/split_batchnorm.py
""" Split BatchNorm A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through a separate BN layer. The first split is passed through the parent BN layers with weight/bias keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' namespace. This allows easily removing the auxiliary BN layers after training to efficiently achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, 'Disentangled Learning via An Auxiliary BN' Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: # aux BN only relevant while training split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" split_input = input.split(split_size) x = [super().forward(split_input[0])] for i, a in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): """ Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. Args: module (torch.nn.Module): input module num_splits: number of separate batchnorm layers to split input across Example:: >>> # model is an instance of torch.nn.Module >>> model = timm.models.convert_splitbn_model(model, num_splits=2) """ mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/test_time_pool.py
""" Test Time Pooling (Average-Max Pool) Hacked together by / Copyright 2020 Ross Wightman """ import logging from torch import nn import torch.nn.functional as F from .adaptive_avgmax_pool import adaptive_avgmax_pool2d _logger = logging.getLogger(__name__) class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d( self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) # delete original fc layer def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), -1) def apply_test_time_pool(model, config, use_test_size=False): test_time_pool = False if not hasattr(model, 'default_cfg') or not model.default_cfg: return model, False if use_test_size and 'test_input_size' in model.default_cfg: df_input_size = model.default_cfg['test_input_size'] else: df_input_size = model.default_cfg['input_size'] if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: _logger.info('Target input size %s > pretrained default %s, using test time pooling' % (str(config['input_size'][-2:]), str(df_input_size[-2:]))) model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) test_time_pool = True return model, test_time_pool
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/lambda_layer.py
""" Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 @misc{2102.08602, Author = {Irwan Bello}, Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, Year = {2021}, } Status: This impl is a WIP. Code snippets in the paper were used as reference but good chance some details are missing/wrong. I've only implemented local lambda conv based pos embeddings. For a PyTorch impl that includes other embedding options checkout https://github.com/lucidrains/lambda-networks Hacked together by / Copyright 2021 Ross Wightman """ import torch from torch import nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ def rel_pos_indices(size): size = to_2tuple(size) pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) rel_pos = pos[:, None, :] - pos[:, :, None] rel_pos[0] += size[0] - 1 rel_pos[1] += size[1] - 1 return rel_pos # 2, H * W, H * W class LambdaLayer(nn.Module): """Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. The internal dimensions of the lambda module are controlled via the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query (q) and key (k) dimension are determined by * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None * q = num_heads * dim_head, k = dim_head * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W stride (int): output stride of the module, avg pool used if stride == 2 num_heads (int): parallel attention heads. dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, qk_ratio=1.0, qkv_bias=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0, ' should be divided by num_heads' self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.num_heads = num_heads self.dim_v = dim_out // num_heads self.qkv = nn.Conv2d( dim, num_heads * self.dim_qk + self.dim_qk + self.dim_v, kernel_size=1, bias=qkv_bias) self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) self.norm_v = nn.BatchNorm2d(self.dim_v) if r is not None: # local lambda convolution for pos self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) self.pos_emb = None self.rel_pos_indices = None else: # relative pos embedding assert feat_size is not None feat_size = to_2tuple(feat_size) rel_size = [2 * s - 1 for s in feat_size] self.conv_lambda = None self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in if self.conv_lambda is not None: trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) if self.pos_emb is not None: trunc_normal_(self.pos_emb, std=.02) def forward(self, x): B, C, H, W = x.shape M = H * W qkv = self.qkv(x) q, k, v = torch.split(qkv, [ self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M content_lam = k @ v # B, K, V content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V if self.pos_emb is None: position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V else: # FIXME relative pos embedding path not fully verified pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W out = self.pool(out) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations_jit.py
""" Activations A collection of jit-scripted activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted versions if they contain in-place ops. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit(x, inplace: bool = False): """Swish - Described in: https://arxiv.org/abs/1710.05941 """ return x.mul(x.sigmoid()) @torch.jit.script def mish_jit(x, _inplace: bool = False): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 """ return x.mul(F.softplus(x).tanh()) class SwishJit(nn.Module): def __init__(self, inplace: bool = False): super(SwishJit, self).__init__() def forward(self, x): return swish_jit(x) class MishJit(nn.Module): def __init__(self, inplace: bool = False): super(MishJit, self).__init__() def forward(self, x): return mish_jit(x) @torch.jit.script def hard_sigmoid_jit(x, inplace: bool = False): # return F.relu6(x + 3.) / 6. return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSigmoidJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidJit, self).__init__() def forward(self, x): return hard_sigmoid_jit(x) @torch.jit.script def hard_swish_jit(x, inplace: bool = False): # return x * (F.relu6(x + 3.) / 6) return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSwishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishJit, self).__init__() def forward(self, x): return hard_swish_jit(x) @torch.jit.script def hard_mish_jit(x, inplace: bool = False): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardMishJit, self).__init__() def forward(self, x): return hard_mish_jit(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/mlp.py
""" MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ from functools import partial from torch import nn as nn from .grn import GlobalResponseNorm from .helpers import to_2tuple class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, norm_layer=None, bias=True, drop=0., use_conv=False, gate_last=True, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.chunk_dim = 1 if use_conv else -1 self.gate_last = gate_last # use second half of width for gate self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) def forward(self, x): x = self.fc1(x) x1, x2 = x.chunk(2, dim=self.chunk_dim) x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2 x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False) class SwiGLU(nn.Module): """ SwiGLU NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and better matches some other common impl which makes mapping checkpoints simpler. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0]) self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 nn.init.ones_(self.fc1_g.bias) nn.init.normal_(self.fc1_g.weight, std=1e-6) def forward(self, x): x_gate = self.fc1_g(x) x = self.fc1_x(x) x = self.act(x_gate) * x x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GatedMlp(nn.Module): """ MLP as used in gMLP """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, gate_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.gate(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class GlobalResponseNormMlp(nn.Module): """ MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv) self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.grn(x) x = self.fc2(x) x = self.drop2(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_conv2d.py
""" Create Conv2d Factory Method Hacked together by / Copyright 2020 Ross Wightman """ from .mixed_conv2d import MixedConv2d from .cond_conv2d import CondConv2d from .conv2d_same import create_conv2d_pad def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): """ Select a 2d convolution implementation based on arguments Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. Used extensively by EfficientNet, MobileNetv3 and related networks. """ if isinstance(kernel_size, list): assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently if 'groups' in kwargs: groups = kwargs.pop('groups') if groups == in_channels: kwargs['depthwise'] = True else: assert groups == 1 # We're going to use only lists for defining the MixedConv2d kernel groups, # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) else: depthwise = kwargs.pop('depthwise', False) # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 groups = in_channels if depthwise else kwargs.pop('groups', 1) if 'num_experts' in kwargs and kwargs['num_experts'] > 0: m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) else: m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) return m
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed.py
""" Position Embedding Utilities Hacked together by / Copyright 2022 Ross Wightman """ import logging import math from typing import List, Tuple, Optional, Union import torch import torch.nn.functional as F from .helpers import to_2tuple _logger = logging.getLogger(__name__) def resample_abs_pos_embed( posemb, new_size: List[int], old_size: Optional[List[int]] = None, num_prefix_tokens: int = 1, interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): # sort out sizes, assume square if old size not provided num_pos_tokens = posemb.shape[1] num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]: return posemb if old_size is None: hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens)) old_size = hw, hw if num_prefix_tokens: posemb_prefix, posemb = posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:] else: posemb_prefix, posemb = None, posemb # do the interpolation embed_dim = posemb.shape[-1] orig_dtype = posemb.dtype posemb = posemb.float() # interpolate needs float32 posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim) posemb = posemb.to(orig_dtype) # add back extra (class, etc) prefix tokens if posemb_prefix is not None: posemb = torch.cat([posemb_prefix, posemb], dim=1) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {old_size} to {new_size}.') return posemb def resample_abs_pos_embed_nhwc( posemb, new_size: List[int], interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]: return posemb orig_dtype = posemb.dtype posemb = posemb.float() # do the interpolation posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).to(orig_dtype) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.') return posemb
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_act.py
""" Activation Factory Hacked together by / Copyright 2020 Ross Wightman """ from typing import Union, Callable, Type from .activations import * from .activations_jit import * from .activations_me import * from .config import is_exportable, is_scriptable, is_no_jit # PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. # Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. # Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. _has_silu = 'silu' in dir(torch.nn.functional) _has_hardswish = 'hardswish' in dir(torch.nn.functional) _has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) _has_mish = 'mish' in dir(torch.nn.functional) _ACT_FN_DEFAULT = dict( silu=F.silu if _has_silu else swish, swish=F.silu if _has_silu else swish, mish=F.mish if _has_mish else mish, relu=F.relu, relu6=F.relu6, leaky_relu=F.leaky_relu, elu=F.elu, celu=F.celu, selu=F.selu, gelu=gelu, gelu_tanh=gelu_tanh, quick_gelu=quick_gelu, sigmoid=sigmoid, tanh=tanh, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, hard_swish=F.hardswish if _has_hardswish else hard_swish, hard_mish=hard_mish, ) _ACT_FN_JIT = dict( silu=F.silu if _has_silu else swish_jit, swish=F.silu if _has_silu else swish_jit, mish=F.mish if _has_mish else mish_jit, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, hard_mish=hard_mish_jit, ) _ACT_FN_ME = dict( silu=F.silu if _has_silu else swish_me, swish=F.silu if _has_silu else swish_me, mish=F.mish if _has_mish else mish_me, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, hard_swish=F.hardswish if _has_hardswish else hard_swish_me, hard_mish=hard_mish_me, ) _ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) for a in _ACT_FNS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) _ACT_LAYER_DEFAULT = dict( silu=nn.SiLU if _has_silu else Swish, swish=nn.SiLU if _has_silu else Swish, mish=nn.Mish if _has_mish else Mish, relu=nn.ReLU, relu6=nn.ReLU6, leaky_relu=nn.LeakyReLU, elu=nn.ELU, prelu=PReLU, celu=nn.CELU, selu=nn.SELU, gelu=GELU, gelu_tanh=GELUTanh, quick_gelu=QuickGELU, sigmoid=Sigmoid, tanh=Tanh, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, hard_swish=nn.Hardswish if _has_hardswish else HardSwish, hard_mish=HardMish, identity=nn.Identity, ) _ACT_LAYER_JIT = dict( silu=nn.SiLU if _has_silu else SwishJit, swish=nn.SiLU if _has_silu else SwishJit, mish=nn.Mish if _has_mish else MishJit, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, hard_mish=HardMishJit, ) _ACT_LAYER_ME = dict( silu=nn.SiLU if _has_silu else SwishMe, swish=nn.SiLU if _has_silu else SwishMe, mish=nn.Mish if _has_mish else MishMe, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, hard_mish=HardMishMe, ) _ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) for a in _ACT_LAYERS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) def get_act_fn(name: Union[Callable, str] = 'relu'): """ Activation Function Factory Fetching activation fns by name with this function allows export or torch script friendly functions to be returned dynamically based on current config. """ if not name: return None if isinstance(name, Callable): return name if not (is_no_jit() or is_exportable() or is_scriptable()): # If not exporting or scripting the model, first look for a memory-efficient version with # custom autograd, then fallback if name in _ACT_FN_ME: return _ACT_FN_ME[name] if not (is_no_jit() or is_exportable()): if name in _ACT_FN_JIT: return _ACT_FN_JIT[name] return _ACT_FN_DEFAULT[name] def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): """ Activation Layer Factory Fetching activation layers by name with this function allows export or torch script friendly functions to be returned dynamically based on current config. """ if name is None: return None if not isinstance(name, str): # callable, module, etc return name if not name: return None if not (is_no_jit() or is_exportable() or is_scriptable()): if name in _ACT_LAYER_ME: return _ACT_LAYER_ME[name] if not (is_no_jit() or is_exportable()): if name in _ACT_LAYER_JIT: return _ACT_LAYER_JIT[name] return _ACT_LAYER_DEFAULT[name] def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): act_layer = get_act_layer(name) if act_layer is None: return None if inplace is None: return act_layer(**kwargs) try: return act_layer(inplace=inplace, **kwargs) except TypeError: # recover if act layer doesn't have inplace arg return act_layer(**kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/halo_attn.py
""" Halo Self Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 @misc{2103.12731, Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and Jonathon Shlens}, Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, Year = {2021}, } Status: This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. The attention mechanism works but it's slow as implemented. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch from torch import nn import torch.nn.functional as F from .helpers import make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, height, width, dim) rel_k: (2 * window - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape rel_size = rel_k.shape[0] win_size = (rel_size + 1) // 2 x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, rel_size) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, rel_size - W]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, rel_size) x = x_pad[:, :W, win_size - 1:] # reshape and tile x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, block_size, win_size, dim_head, scale): """ Args: block_size (int): block size win_size (int): neighbourhood window size dim_head (int): attention head dim scale (float): scale factor (for init) """ super().__init__() self.block_size = block_size self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) def forward(self, q): B, BB, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, BB, HW, -1) return rel_logits class HaloAttn(nn.Module): """ Halo Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) stride: output stride of the module, query downscaled if > 1 (default: 1). num_heads: parallel attention heads (default: 8). dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set block_size (int): size of blocks. (default: 8) halo_size (int): size of halo overlap. (default: 3) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool) : add bias to q, k, and v projections avg_down (bool): use average pool downsample instead of strided query blocks scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0 assert stride in (1, 2) self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.block_size = self.block_size_ds = block_size self.halo_size = halo_size self.win_size = block_size + halo_size * 2 # neighbourhood window size self.block_stride = 1 use_avg_pool = False if stride > 1: use_avg_pool = avg_down or block_size % stride != 0 self.block_stride = 1 if use_avg_pool else stride self.block_size_ds = self.block_size // self.block_stride # FIXME not clear if this stride behaviour is what the paper intended # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving # data in unfolded block form. I haven't wrapped my head around how that'd look. self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) self.pos_embed = PosEmbedRel( block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() self.reset_parameters() def reset_parameters(self): std = self.q.weight.shape[1] ** -0.5 # fan-in trunc_normal_(self.q.weight, std=std) trunc_normal_(self.kv.weight, std=std) trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H % self.block_size == 0, '') _assert(W % self.block_size == 0, '') num_h_blocks = H // self.block_size num_w_blocks = W // self.block_size num_blocks = num_h_blocks * num_w_blocks q = self.q(x) # unfold q = q.reshape( -1, self.dim_head_qk, num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) # B, num_heads * dim_head * block_size ** 2, num_blocks q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) # B * num_heads, num_blocks, block_size ** 2, dim_head kv = self.kv(x) # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. # FIXME figure out how to switch impl between this and conv2d if XLA being used. kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v if self.scale_pos_embed: attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale else: attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 attn = attn.softmax(dim=-1) out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks # fold out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) out = out.permute(0, 3, 1, 4, 2).contiguous().view( B, self.dim_out_v, H // self.block_stride, W // self.block_stride) # B, dim_out, H // block_stride, W // block_stride out = self.pool(out) return out """ Three alternatives for overlapping windows. `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() if is_xla: # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. WW = self.win_size ** 2 pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) elif self.stride_tricks: kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() kv = kv.as_strided(( B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) else: kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) kv = kv.reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) """
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/adaptive_avgmax_pool.py
""" PyTorch selectable adaptive pooling Adaptive pooling with the ability to select the type of pooling from: * 'avg' - Average pooling * 'max' - Max pooling * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim Both a functional and a nn.Module version of the pooling is provided. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from .format import get_spatial_dim, get_channel_dim _int_tuple_2_t = Union[int, Tuple[int, int]] def adaptive_pool_feat_mult(pool_type='avg'): if pool_type.endswith('catavgmax'): return 2 else: return 1 def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return 0.5 * (x_avg + x_max) def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return torch.cat((x_avg, x_max), 1) def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1): """Selectable global pooling function with dynamic input kernel size """ if pool_type == 'avg': x = F.adaptive_avg_pool2d(x, output_size) elif pool_type == 'avgmax': x = adaptive_avgmax_pool2d(x, output_size) elif pool_type == 'catavgmax': x = adaptive_catavgmax_pool2d(x, output_size) elif pool_type == 'max': x = F.adaptive_max_pool2d(x, output_size) else: assert False, 'Invalid pool type: %s' % pool_type return x class FastAdaptiveAvgPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'): super(FastAdaptiveAvgPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.mean(self.dim, keepdim=not self.flatten) class FastAdaptiveMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.amax(self.dim, keepdim=not self.flatten) class FastAdaptiveAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveAvgMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim, keepdim=not self.flatten) x_max = x.amax(self.dim, keepdim=not self.flatten) return 0.5 * x_avg + 0.5 * x_max class FastAdaptiveCatAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveCatAvgMaxPool, self).__init__() self.flatten = flatten self.dim_reduce = get_spatial_dim(input_fmt) if flatten: self.dim_cat = 1 else: self.dim_cat = get_channel_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten) x_max = x.amax(self.dim_reduce, keepdim=not self.flatten) return torch.cat((x_avg, x_max), self.dim_cat) class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_avgmax_pool2d(x, self.output_size) class AdaptiveCatAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveCatAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_catavgmax_pool2d(x, self.output_size) class SelectAdaptivePool2d(nn.Module): """Selectable global pooling layer with dynamic input kernel size """ def __init__( self, output_size: _int_tuple_2_t = 1, pool_type: str = 'fast', flatten: bool = False, input_fmt: str = 'NCHW', ): super(SelectAdaptivePool2d, self).__init__() assert input_fmt in ('NCHW', 'NHWC') self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing if not pool_type: self.pool = nn.Identity() # pass through self.flatten = nn.Flatten(1) if flatten else nn.Identity() elif pool_type.startswith('fast') or input_fmt != 'NCHW': assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.' if pool_type.endswith('catavgmax'): self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('avgmax'): self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('max'): self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt) else: self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt) self.flatten = nn.Identity() else: assert input_fmt == 'NCHW' if pool_type == 'avgmax': self.pool = AdaptiveAvgMaxPool2d(output_size) elif pool_type == 'catavgmax': self.pool = AdaptiveCatAvgMaxPool2d(output_size) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) else: self.pool = nn.AdaptiveAvgPool2d(output_size) self.flatten = nn.Flatten(1) if flatten else nn.Identity() def is_identity(self): return not self.pool_type def forward(self, x): x = self.pool(x) x = self.flatten(x) return x def feat_mult(self): return adaptive_pool_feat_mult(self.pool_type) def __repr__(self): return self.__class__.__name__ + '(' \ + 'pool_type=' + self.pool_type \ + ', flatten=' + str(self.flatten) + ')'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/classifier.py
""" Classifier head and layer factory Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from functools import partial from typing import Optional, Union, Callable import torch import torch.nn as nn from torch.nn import functional as F from .adaptive_avgmax_pool import SelectAdaptivePool2d from .create_act import get_act_layer from .create_norm import get_norm_layer def _create_pool( num_features: int, num_classes: int, pool_type: str = 'avg', use_conv: bool = False, input_fmt: Optional[str] = None, ): flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling if not pool_type: assert num_classes == 0 or use_conv,\ 'Pooling can only be disabled if classifier is also removed or conv classifier is used' flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) global_pool = SelectAdaptivePool2d( pool_type=pool_type, flatten=flatten_in_pool, input_fmt=input_fmt, ) num_pooled_features = num_features * global_pool.feat_mult() return global_pool, num_pooled_features def _create_fc(num_features, num_classes, use_conv=False): if num_classes <= 0: fc = nn.Identity() # pass-through (no classifier) elif use_conv: fc = nn.Conv2d(num_features, num_classes, 1, bias=True) else: fc = nn.Linear(num_features, num_classes, bias=True) return fc def create_classifier( num_features: int, num_classes: int, pool_type: str = 'avg', use_conv: bool = False, input_fmt: str = 'NCHW', drop_rate: Optional[float] = None, ): global_pool, num_pooled_features = _create_pool( num_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt, ) fc = _create_fc( num_pooled_features, num_classes, use_conv=use_conv, ) if drop_rate is not None: dropout = nn.Dropout(drop_rate) return global_pool, dropout, fc return global_pool, fc class ClassifierHead(nn.Module): """Classifier head w/ configurable global pooling and dropout.""" def __init__( self, in_features: int, num_classes: int, pool_type: str = 'avg', drop_rate: float = 0., use_conv: bool = False, input_fmt: str = 'NCHW', ): """ Args: in_features: The number of input features. num_classes: The number of classes for the final classifier layer (output). pool_type: Global pooling type, pooling disabled if empty string (''). drop_rate: Pre-classifier dropout rate. """ super(ClassifierHead, self).__init__() self.in_features = in_features self.use_conv = use_conv self.input_fmt = input_fmt global_pool, fc = create_classifier( in_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt, ) self.global_pool = global_pool self.drop = nn.Dropout(drop_rate) self.fc = fc self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() def reset(self, num_classes, pool_type=None): if pool_type is not None and pool_type != self.global_pool.pool_type: self.global_pool, self.fc = create_classifier( self.in_features, num_classes, pool_type=pool_type, use_conv=self.use_conv, input_fmt=self.input_fmt, ) self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity() else: num_pooled_features = self.in_features * self.global_pool.feat_mult() self.fc = _create_fc( num_pooled_features, num_classes, use_conv=self.use_conv, ) def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) class NormMlpClassifierHead(nn.Module): def __init__( self, in_features: int, num_classes: int, hidden_size: Optional[int] = None, pool_type: str = 'avg', drop_rate: float = 0., norm_layer: Union[str, Callable] = 'layernorm2d', act_layer: Union[str, Callable] = 'tanh', ): """ Args: in_features: The number of input features. num_classes: The number of classes for the final classifier layer (output). hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None. pool_type: Global pooling type, pooling disabled if empty string (''). drop_rate: Pre-classifier dropout rate. norm_layer: Normalization layer type. act_layer: MLP activation layer type (only used if hidden_size is not None). """ super().__init__() self.in_features = in_features self.hidden_size = hidden_size self.num_features = in_features self.use_conv = not pool_type norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) self.norm = norm_layer(in_features) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() if hidden_size: self.pre_logits = nn.Sequential(OrderedDict([ ('fc', linear_layer(in_features, hidden_size)), ('act', act_layer()), ])) self.num_features = hidden_size else: self.pre_logits = nn.Identity() self.drop = nn.Dropout(drop_rate) self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def reset(self, num_classes, global_pool=None): if global_pool is not None: self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.use_conv = self.global_pool.is_identity() linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear if self.hidden_size: if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)): with torch.no_grad(): new_fc = linear_layer(self.in_features, self.hidden_size) new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape)) new_fc.bias.copy_(self.pre_logits.fc.bias) self.pre_logits.fc = new_fc self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.norm(x) x = self.flatten(x) x = self.pre_logits(x) x = self.drop(x) if pre_logits: return x x = self.fc(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/__init__.py
from .activations import * from .adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .attention_pool import AttentionPoolLatent from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from .blur_pool import BlurPool2d from .classifier import ClassifierHead, create_classifier, NormMlpClassifierHead from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \ set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm import get_norm_layer, create_norm_layer from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to from .gather_excite import GatherExcite from .global_context import GlobalContext from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from .inplace_abn import InplaceAbn from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\ SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d from .padding import get_padding, get_same_padding, pad_same from .patch_dropout import PatchDropout from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed from .pool2d_same import AvgPool2dSame, create_pool2d from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, \ resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \ build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \ FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvNormAct from .space_to_depth import SpaceToDepthModule, SpaceToDepth, DepthToSpace from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .trace_utils import _assert, _float_to_int from .typing import LayerType, PadType from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/interpolate.py
""" Interpolation helpers for timm layers RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations Copyright Shane Barratt, Apache 2.0 license """ import torch from itertools import product class RegularGridInterpolator: """ Interpolate data defined on a rectilinear grid with even or uneven spacing. Produces similar results to scipy RegularGridInterpolator or interp2d in 'linear' mode. Taken from https://github.com/sbarratt/torch_interpolations """ def __init__(self, points, values): self.points = points self.values = values assert isinstance(self.points, tuple) or isinstance(self.points, list) assert isinstance(self.values, torch.Tensor) self.ms = list(self.values.shape) self.n = len(self.points) assert len(self.ms) == self.n for i, p in enumerate(self.points): assert isinstance(p, torch.Tensor) assert p.shape[0] == self.values.shape[i] def __call__(self, points_to_interp): assert self.points is not None assert self.values is not None assert len(points_to_interp) == len(self.points) K = points_to_interp[0].shape[0] for x in points_to_interp: assert x.shape[0] == K idxs = [] dists = [] overalls = [] for p, x in zip(self.points, points_to_interp): idx_right = torch.bucketize(x, p) idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1 idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1) dist_left = x - p[idx_left] dist_right = p[idx_right] - x dist_left[dist_left < 0] = 0. dist_right[dist_right < 0] = 0. both_zero = (dist_left == 0) & (dist_right == 0) dist_left[both_zero] = dist_right[both_zero] = 1. idxs.append((idx_left, idx_right)) dists.append((dist_left, dist_right)) overalls.append(dist_left + dist_right) numerator = 0. for indexer in product([0, 1], repeat=self.n): as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)] bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)] numerator += self.values[as_s] * \ torch.prod(torch.stack(bs_s), dim=0) denominator = torch.prod(torch.stack(overalls), dim=0) return numerator / denominator
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/ml_decoder.py
from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print("Model code-writing is not aligned currently with ml-decoder") exit(-1) if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", layer_norm_eps=1e-5) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # @torch.jit.script # class ExtrapClasses(object): # def __init__(self, num_queries: int, group_size: int): # self.num_queries = num_queries # self.group_size = group_size # # def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: # torch.Tensor): # # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) # h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) # w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) # out = (h * w).sum(dim=2) + class_embed_b # out = out.view((h.shape[0], self.group_size * self.num_queries)) # return out @torch.jit.script class GroupFC(object): def __init__(self, embed_len_decoder: int): self.embed_len_decoder = embed_len_decoder def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): for i in range(self.embed_len_decoder): h_i = h[:, i, :] w_i = duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes # switching to 768 initial embeddings decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) # decoder decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) # non-learnable queries self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) # group fully-connected self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter( torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) self.group_fc = GroupFC(embed_len_decoder) def forward(self, x): if len(x.shape) == 4: # [bs,2048, 7,7] embedding_spatial = x.flatten(2).transpose(1, 2) else: # [bs, 197,468] embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) self.group_fc(h, self.duplicate_pooling, out_extrap) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/non_local_attn.py
""" Bilinear-Attention-Transform and Non-Local Attention Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification """ import torch from torch import nn from torch.nn import functional as F from .conv_bn_act import ConvNormAct from .helpers import make_divisible from .trace_utils import _assert class NonLocalAttn(nn.Module): """Spatial NL block for image classification. This was adapted from https://github.com/BA-Transform/BAT-Image-Classification Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. """ def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): super(NonLocalAttn, self).__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.scale = in_channels ** -0.5 if use_scale else 1.0 self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) self.norm = nn.BatchNorm2d(in_channels) self.reset_parameters() def forward(self, x): shortcut = x t = self.t(x) p = self.p(x) g = self.g(x) B, C, H, W = t.size() t = t.view(B, C, -1).permute(0, 2, 1) p = p.view(B, C, -1) g = g.view(B, C, -1).permute(0, 2, 1) att = torch.bmm(t, p) * self.scale att = F.softmax(att, dim=2) x = torch.bmm(att, g) x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.z(x) x = self.norm(x) + shortcut return x def reset_parameters(self): for name, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') if len(list(m.parameters())) > 1: nn.init.constant_(m.bias, 0.0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.GroupNorm): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) class BilinearAttnTransform(nn.Module): def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super(BilinearAttnTransform, self).__init__() self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.block_size = block_size self.groups = groups self.in_channels = in_channels def resize_mat(self, x, t: int): B, C, block_size, block_size1 = x.shape _assert(block_size == block_size1, '') if t <= 1: return x x = x.view(B * C, -1, 1, 1) x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) x = x.view(B * C, block_size, block_size, t, t) x = torch.cat(torch.split(x, 1, dim=1), dim=3) x = torch.cat(torch.split(x, 1, dim=2), dim=4) x = x.view(B, C, block_size * t, block_size * t) return x def forward(self, x): _assert(x.shape[-1] % self.block_size == 0, '') _assert(x.shape[-2] % self.block_size == 0, '') B, C, H, W = x.shape out = self.conv1(x) rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) cp = F.adaptive_max_pool2d(out, (1, self.block_size)) p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() p = p / p.sum(dim=3, keepdim=True) q = q / q.sum(dim=2, keepdim=True) p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() p = p.view(B, C, self.block_size, self.block_size) q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() q = q.view(B, C, self.block_size, self.block_size) p = self.resize_mat(p, H // self.block_size) q = self.resize_mat(q, W // self.block_size) y = p.matmul(x) y = y.matmul(q) y = self.conv2(y) return y class BatNonLocalAttn(nn.Module): """ BAT Adapted from: https://github.com/BA-Transform/BAT-Image-Classification """ def __init__( self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): super().__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.dropout = nn.Dropout2d(p=drop_rate) def forward(self, x): xl = self.conv1(x) y = self.ba(xl) y = self.conv2(y) y = self.dropout(y) return y + x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/evo_norm.py
""" EvoNorm in PyTorch Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 @inproceedings{NEURIPS2020, author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, pages = {13539--13550}, publisher = {Curran Associates, Inc.}, title = {Evolving Normalization-Activation Layers}, url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, volume = {33}, year = {2020} } An attempt at getting decent performing EvoNorms running in PyTorch. While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm in terms of memory usage and throughput on GPUs. I'm testing these modules on TPU w/ PyTorch XLA. Promising start but currently working around some issues with builtin torch/tensor.var/std. Unlike GPU, similar train speeds for EvoNormS variants and BatchNorm. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Sequence, Union import torch import torch.nn as nn import torch.nn.functional as F from .create_act import create_act_layer from .trace_utils import _assert def instance_std(x, eps: float = 1e-5): std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) return std.expand(x.shape) def instance_std_tpu(x, eps: float = 1e-5): std = manual_var(x, dim=(2, 3)).add(eps).sqrt() return std.expand(x.shape) # instance_std = instance_std_tpu def instance_rms(x, eps: float = 1e-5): rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) return rms.expand(x.shape) def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): xm = x.mean(dim=dim, keepdim=True) if diff_sqm: # difference of squared mean and mean squared, faster on TPU can be less stable var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) else: var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) return var def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): B, C, H, W = x.shape x_dtype = x.dtype _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) else: x = x.reshape(B, groups, C // groups, H, W) std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) return std.expand(x.shape).reshape(B, C, H, W) def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): # This is a workaround for some stability / odd behaviour of .var and .std # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results B, C, H, W = x.shape _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues var = manual_var(x, dim=-1, diff_sqm=diff_sqm) else: x = x.reshape(B, groups, C // groups, H, W) var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) #group_std = group_std_tpu # FIXME TPU temporary def group_rms(x, groups: int = 32, eps: float = 1e-5): B, C, H, W = x.shape _assert(C % groups == 0, '') x_dtype = x.dtype x = x.reshape(B, groups, C // groups, H, W) rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) return rms.expand(x.shape).reshape(B, C, H, W) class EvoNorm2dB0(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) # var = manual_var(x, dim=(0, 2, 3)).squeeze() n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach() * self.momentum * (n / (n - 1))) else: var = self.running_var left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) v = self.v.to(x_dtype).view(v_shape) right = x * v + instance_std(x, self.eps) x = x / left.max(right) return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) class EvoNorm2dB1(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = (x + 1) * instance_rms(x, self.eps) x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dB2(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = instance_rms(x, self.eps) - x x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0a(EvoNorm2dS0): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) d = group_std(x, self.groups, self.eps) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() x = x / d return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.pre_act_norm = False self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1a(EvoNorm2dS1): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2a(EvoNorm2dS2): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/conv2d_same.py
""" Conv2d w/ Same Padding Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple, Optional from .config import is_exportable, is_scriptable from .padding import pad_same, pad_same_arg, get_padding_value _USE_EXPORT_CONV = False def conv2d_same( x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1, ): x = pad_same(x, weight.shape[-2:], stride, dilation) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSame(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSame, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) def forward(self, x): return conv2d_same( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) class Conv2dSameExport(nn.Conv2d): """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions NOTE: This does not currently work with torch.jit.script """ # pylint: disable=unused-argument def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSameExport, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[-2:] if self.pad is None: pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size x = self.pad(x) return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if _USE_EXPORT_CONV and is_exportable(): # older PyTorch ver needed this to export same padding reasonably assert not is_scriptable() # Conv2DSameExport does not work with jit return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/patch_dropout.py
from typing import Optional, Tuple, Union import torch import torch.nn as nn class PatchDropout(nn.Module): """ https://arxiv.org/abs/2212.00794 """ return_indices: torch.jit.Final[bool] def __init__( self, prob: float = 0.5, num_prefix_tokens: int = 1, ordered: bool = False, return_indices: bool = False, ): super().__init__() assert 0 <= prob < 1. self.prob = prob self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens) self.ordered = ordered self.return_indices = return_indices def forward(self, x) -> Union[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]: if not self.training or self.prob == 0.: if self.return_indices: return x, None return x if self.num_prefix_tokens: prefix_tokens, x = x[:, :self.num_prefix_tokens], x[:, self.num_prefix_tokens:] else: prefix_tokens = None B = x.shape[0] L = x.shape[1] num_keep = max(1, int(L * (1. - self.prob))) keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep] if self.ordered: # NOTE does not need to maintain patch order in typical transformer use, # but possibly useful for debug / visualization keep_indices = keep_indices.sort(dim=-1)[0] x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:])) if prefix_tokens is not None: x = torch.cat((prefix_tokens, x), dim=1) if self.return_indices: return x, keep_indices return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/gather_excite.py
""" Gather-Excite Attention Block Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another impl that covers all of the cases. NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation Hacked together by / Copyright 2021 Ross Wightman """ import math from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .create_conv2d import create_conv2d from .helpers import make_divisible from .mlp import ConvMlp class GatherExcite(nn.Module): """ Gather-Excite Attention Module """ def __init__( self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): super(GatherExcite, self).__init__() self.add_maxpool = add_maxpool act_layer = get_act_layer(act_layer) self.extent = extent if extra_params: self.gather = nn.Sequential() if extent == 0: assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' self.gather.add_module( 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) if norm_layer: self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) else: assert extent % 2 == 0 num_conv = int(math.log2(extent)) for i in range(num_conv): self.gather.add_module( f'conv{i + 1}', create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) if norm_layer: self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) if i != num_conv - 1: self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) else: self.gather = None if self.extent == 0: self.gk = 0 self.gs = 0 else: assert extent % 2 == 0 self.gk = self.extent * 2 - 1 self.gs = self.extent if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() self.gate = create_act_layer(gate_layer) def forward(self, x): size = x.shape[-2:] if self.gather is not None: x_ge = self.gather(x) else: if self.extent == 0: # global extent x_ge = x.mean(dim=(2, 3), keepdims=True) if self.add_maxpool: # experimental codepath, may remove or change x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) else: x_ge = F.avg_pool2d( x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) if self.add_maxpool: # experimental codepath, may remove or change x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) x_ge = self.mlp(x_ge) if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: x_ge = F.interpolate(x_ge, size=size) return x * self.gate(x_ge)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/linear.py
""" Linear layer (alternate definition) """ import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. """ def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/norm.py
""" Normalization layers and wrappers Norm layer definitions that support fast norm and consistent channel arg order (always first arg). Hacked together by / Copyright 2022 Ross Wightman """ import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm class GroupNorm(nn.GroupNorm): def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN super().__init__(num_groups, num_channels, eps=eps, affine=affine) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x): if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, *] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): """ LayerNorm w/ fast norm option """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNorm2d(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: # jit is oh so lovely :/ if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) @torch.jit.script def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). Experimental implementation w/ manual norm for tensors non-contiguous tensors. This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last layout. However, benefits are not always clear and can perform worse on other GPUs. """ def __init__(self, num_channels, eps=1e-6): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/trace_utils.py
try: from torch import _assert except ImportError: def _assert(condition: bool, message: str): assert condition, message def _float_to_int(x: float) -> int: """ Symbolic tracing helper to substitute for inbuilt `int`. Hint: Inbuilt `int` can't accept an argument of type `Proxy` """ return int(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_norm_act.py
""" NormAct (Normalizaiton + Activation Layer) Factory Create norm + act combo modules that attempt to be backwards compatible with separate norm + act isntances in models. Where these are used it will be possible to swap separate BN + act layers with combined modules like IABN or EvoNorms. Hacked together by / Copyright 2020 Ross Wightman """ import types import functools from .evo_norm import * from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d from .inplace_abn import InplaceAbn _NORM_ACT_MAP = dict( batchnorm=BatchNormAct2d, batchnorm2d=BatchNormAct2d, groupnorm=GroupNormAct, groupnorm1=functools.partial(GroupNormAct, num_groups=1), layernorm=LayerNormAct, layernorm2d=LayerNormAct2d, evonormb0=EvoNorm2dB0, evonormb1=EvoNorm2dB1, evonormb2=EvoNorm2dB2, evonorms0=EvoNorm2dS0, evonorms0a=EvoNorm2dS0a, evonorms1=EvoNorm2dS1, evonorms1a=EvoNorm2dS1a, evonorms2=EvoNorm2dS2, evonorms2a=EvoNorm2dS2a, frn=FilterResponseNormAct2d, frntlu=FilterResponseNormTlu2d, inplaceabn=InplaceAbn, iabn=InplaceAbn, ) _NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} # has act_layer arg to define act type _NORM_ACT_REQUIRES_ARG = { BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): layer = get_norm_act_layer(layer_name, act_layer=act_layer) layer_instance = layer(num_features, apply_act=apply_act, **kwargs) if jit: layer_instance = torch.jit.script(layer_instance) return layer_instance def get_norm_act_layer(norm_layer, act_layer=None): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) norm_act_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_act_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower().split('-')[0] norm_act_layer = _NORM_ACT_MAP[layer_name] elif norm_layer in _NORM_ACT_TYPES: norm_act_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): # if function type, must be a lambda/fn that creates a norm_act layer norm_act_layer = norm_layer else: type_name = norm_layer.__name__.lower() if type_name.startswith('batchnorm'): norm_act_layer = BatchNormAct2d elif type_name.startswith('groupnorm'): norm_act_layer = GroupNormAct elif type_name.startswith('groupnorm1'): norm_act_layer = functools.partial(GroupNormAct, num_groups=1) elif type_name.startswith('layernorm2d'): norm_act_layer = LayerNormAct2d elif type_name.startswith('layernorm'): norm_act_layer = LayerNormAct else: assert False, f"No equivalent norm_act layer for {type_name}" if norm_act_layer in _NORM_ACT_REQUIRES_ARG: # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types norm_act_kwargs.setdefault('act_layer', act_layer) if norm_act_kwargs: norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args return norm_act_layer
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed_sincos.py
""" Sin-cos, fourier, rotary position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math from typing import List, Tuple, Optional, Union import torch from torch import nn as nn from .trace_utils import _assert def pixel_freq_bands( num_bands: int, max_freq: float = 224., linear_bands: bool = True, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ): if linear_bands: bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device) else: bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device) return bands * torch.pi def freq_bands( num_bands: int, temperature: float = 10000., step: int = 2, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ) -> torch.Tensor: bands = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)) return bands def build_sincos2d_pos_embed( feat_shape: List[int], dim: int = 64, temperature: float = 10000., reverse_coord: bool = False, interleave_sin_cos: bool = False, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None ) -> torch.Tensor: """ Args: feat_shape: dim: temperature: reverse_coord: stack grid order W, H instead of H, W interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos dtype: device: Returns: """ assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' pos_dim = dim // 4 bands = freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device) if reverse_coord: feat_shape = feat_shape[::-1] # stack W, H instead of H, W grid = torch.stack(torch.meshgrid( [torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1) pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) # FIXME add support for unflattened spatial dim? stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) return pos_emb def build_fourier_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, num_bands: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, include_grid: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Args: feat_shape: Feature shape for embedding. bands: Pre-calculated frequency bands. num_bands: Number of frequency bands (determines output dim). max_res: Maximum resolution for pixel based freq. temperature: Temperature for non-pixel freq. linear_bands: Linear band spacing for pixel based freq. include_grid: Include the spatial grid in output. in_pixels: Output in pixel freq. ref_feat_shape: Reference feature shape for resize / fine-tune. dtype: Output dtype. device: Output device. Returns: """ if bands is None: if in_pixels: bands = pixel_freq_bands( num_bands, float(max_res), linear_bands=linear_bands, dtype=dtype, device=device, ) else: bands = freq_bands( num_bands, temperature=temperature, step=1, dtype=dtype, device=device, ) else: if device is None: device = bands.device if dtype is None: dtype = bands.dtype if in_pixels: t = [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape] else: t = [torch.arange(s, device=device, dtype=dtype) for s in feat_shape] if ref_feat_shape is not None: # eva's scheme for resizing rope embeddings (ref shape = pretrain) t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)] grid = torch.stack(torch.meshgrid(t), dim=-1) grid = grid.unsqueeze(-1) pos = grid * bands pos_sin, pos_cos = pos.sin(), pos.cos() out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos] return out class FourierEmbed(nn.Module): def __init__( self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False, ): super().__init__() self.max_res = max_res self.num_bands = num_bands self.concat_grid = concat_grid self.keep_spatial = keep_spatial self.register_buffer( 'bands', pixel_freq_bands(max_res, num_bands), persistent=False, ) def forward(self, x): B, C = x.shape[:2] feat_shape = x.shape[2:] emb = build_fourier_pos_embed( feat_shape, self.bands, include_grid=self.concat_grid, dtype=x.dtype, device=x.device, ) emb = torch.cat(emb, dim=-1) emb = emb.transpose(-1, -2).flatten(len(feat_shape)) batch_expand = (B,) + (-1,) * (x.ndim - 1) # FIXME support nD if self.keep_spatial: x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) else: x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) x = x.reshape(B, feat_shape.numel(), -1) return x def rot(x): return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): if isinstance(x, torch.Tensor): x = [x] return [t * cos_emb + rot(t) * sin_emb for t in x] def apply_rot_embed_cat(x: torch.Tensor, emb): sin_emb, cos_emb = emb.tensor_split(2, -1) if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_keep_indices_nlc(x, pos_embed, keep_indices): pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1) pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])) return pos_embed def build_rotary_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, dim: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ): """ Args: feat_shape: Spatial shape of the target tensor for embedding. bands: Optional pre-generated frequency bands dim: Output dimension of embedding tensor. max_res: Maximum resolution for pixel mode. temperature: Temperature (inv freq) for non-pixel mode linear_bands: Linearly (instead of log) spaced bands for pixel mode in_pixels: Pixel vs language (inv freq) mode. dtype: Output dtype. device: Output device. Returns: """ sin_emb, cos_emb = build_fourier_pos_embed( feat_shape, bands=bands, num_bands=dim // 4, max_res=max_res, temperature=temperature, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=ref_feat_shape, device=device, dtype=dtype, ) num_spatial_dim = 1 # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks for x in feat_shape: num_spatial_dim *= x sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) return sin_emb, cos_emb class RotaryEmbedding(nn.Module): """ Rotary position embedding NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not been well tested, and will likely change. It will be moved to its own file. The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) print(bands) self.register_buffer( 'bands', bands, persistent=False, ) self.pos_embed_sin = None self.pos_embed_cos = None else: # cache full sin/cos embeddings if shape provided up front emb_sin, emb_cos = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed_sin', emb_sin, persistent=False, ) self.register_buffer( 'pos_embed_cos', emb_cos, persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None: # rebuild embeddings every call, use if target shape changes assert shape is not None return build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ) else: return self.pos_embed_sin, self.pos_embed_cos def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 sin_emb, cos_emb = self.get_embed(x.shape[2:]) return apply_rot_embed(x, sin_emb, cos_emb) class RotaryEmbeddingCat(nn.Module): """ Rotary position embedding w/ concatenatd sin & cos The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) self.register_buffer( 'bands', bands, persistent=False, ) self.pos_embed = None else: # cache full sin/cos embeddings if shape provided up front embeds = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed', torch.cat(embeds, -1), persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None and shape is not None: # rebuild embeddings every call, use if target shape changes embeds = build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ref_feat_shape=self.ref_feat_shape, ) return torch.cat(embeds, -1) elif self.pos_embed is not None: return self.pos_embed else: assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands" def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 pos_embed = self.get_embed(x.shape[2:]) return apply_rot_embed_cat(x, pos_embed)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/bottleneck_attn.py
""" Bottleneck Self Attention (Bottleneck Transformers) Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 @misc{2101.11605, Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, Title = {Bottleneck Transformers for Visual Recognition}, Year = {2021}, } Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 This impl is a WIP but given that it is based on the ref gist likely not too far off. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, heads, height, width, dim) rel_k: (2 * width - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, 2 * W -1) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, W - 1]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) x = x_pad[:, :W, W - 1:] # reshape and tile x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, feat_size, dim_head, scale): super().__init__() self.height, self.width = to_2tuple(feat_size) self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) def forward(self, q): B, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(B, self.height, self.width, -1) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, HW, HW) return rel_logits class BottleneckAttn(nn.Module): """ Bottleneck Attention Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). num_heads (int): parallel attention heads (default: 4) dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): super().__init__() assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' dim_out = dim_out or dim assert dim_out % num_heads == 0 self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) # NOTE I'm only supporting relative pos embedding for now self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H == self.pos_embed.height, '') _assert(W == self.pos_embed.width, '') x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) if self.scale_pos_embed: attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W else: attn = (q @ k) * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W out = self.pool(out) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/split_attn.py
""" Split Attention Conv2d (for ResNeSt Models) Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt Modified for torchscript compat, performance, and consistency with timm by Ross Wightman """ import torch import torch.nn.functional as F from torch import nn from .helpers import make_divisible class RadixSoftmax(nn.Module): def __init__(self, radix, cardinality): super(RadixSoftmax, self).__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttn(nn.Module): """Split-Attention (aka Splat) """ def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): super(SplitAttn, self).__init__() out_channels = out_channels or in_channels self.radix = radix mid_chs = out_channels * radix if rd_channels is None: attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) else: attn_chs = rd_channels * radix padding = kernel_size // 2 if padding is None else padding self.conv = nn.Conv2d( in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.drop(x) x = self.act0(x) B, RC, H, W = x.shape if self.radix > 1: x = x.reshape((B, self.radix, RC // self.radix, H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = x_gap.mean((2, 3), keepdim=True) x_gap = self.fc1(x_gap) x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) if self.radix > 1: out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) else: out = x * x_attn return out.contiguous()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/squeeze_excite.py
""" Squeeze-and-Excitation Channel Attention An SE implementation originally based on PyTorch SE-Net impl. Has since evolved with additional functionality / configuration. Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 Also included is Effective Squeeze-Excitation (ESE). Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn from .create_act import create_act_layer from .helpers import make_divisible class SEModule(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): super(SEModule, self).__init__() self.add_maxpool = add_maxpool if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc1(x_se) x_se = self.act(self.bn(x_se)) x_se = self.fc2(x_se) return x * self.gate(x_se) SqueezeExcite = SEModule # alias class EffectiveSEModule(nn.Module): """ 'Effective Squeeze-Excitation From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 """ def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): super(EffectiveSEModule, self).__init__() self.add_maxpool = add_maxpool self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc(x_se) return x * self.gate(x_se) EffectiveSqueezeExcite = EffectiveSEModule # alias class SqueezeExciteCl(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'): super().__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Linear(channels, rd_channels, bias=bias) self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Linear(rd_channels, channels, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((1, 2), keepdims=True) # FIXME avg dim [1:n-1], don't assume 2D NHWC x_se = self.fc1(x_se) x_se = self.act(x_se) x_se = self.fc2(x_se) return x * self.gate(x_se)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/cond_conv2d.py
""" PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 Ross Wightman """ import math from functools import partial import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): """CondConv initializer function.""" num_params = np.prod(expert_shape) if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params): raise (ValueError( 'CondConv variables must have shape [num_experts, num_params]')) for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): """ Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 """ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) padding_val, is_padding_dynamic = get_padding_value( padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer( partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer( partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): B, C, H, W = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # reshape instead of view to work with channels_last input x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/cbam.py
""" CBAM (sort-of) Attention Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on some tasks, especially fine-grained it seems. I may end up removing this impl. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn import torch.nn.functional as F from .conv_bn_act import ConvNormAct from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible class ChannelAttn(nn.Module): """ Original CBAM channel attention module, currently avg + max pool variant only. """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(ChannelAttn, self).__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) return x * self.gate(x_avg + x_max) class LightChannelAttn(ChannelAttn): """An experimental 'lightweight' that sums avg + max pool first """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightChannelAttn, self).__init__( channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) def forward(self, x): x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) x_attn = self.fc2(self.act(self.fc1(x_pool))) return x * F.sigmoid(x_attn) class SpatialAttn(nn.Module): """ Original CBAM spatial attention module """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(SpatialAttn, self).__init__() self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class LightSpatialAttn(nn.Module): """An experimental 'lightweight' variant that sums avg_pool and max_pool results. """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(LightSpatialAttn, self).__init__() self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class CbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x class LightCbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightCbamModule, self).__init__() self.channel = LightChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = LightSpatialAttn(spatial_kernel_size) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/mixed_conv2d.py
""" PyTorch Mixed Convolution Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv2d_same import create_conv2d_pad def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split class MixedConv2d(nn.ModuleDict): """ Mixed Grouped Convolution Based on MDConv and GroupedConv in MixNet impl: https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = in_ch if depthwise else 1 # use add_module to keep key space clean self.add_module( str(idx), create_conv2d_pad( in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs) ) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [c(x_split[i]) for i, c in enumerate(self.values())] x = torch.cat(x_out, 1) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/format.py
from enum import Enum from typing import Union import torch class Format(str, Enum): NCHW = 'NCHW' NHWC = 'NHWC' NCL = 'NCL' NLC = 'NLC' FormatT = Union[str, Format] def get_spatial_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NLC: dim = (1,) elif fmt is Format.NCL: dim = (2,) elif fmt is Format.NHWC: dim = (1, 2) else: dim = (2, 3) return dim def get_channel_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NHWC: dim = 3 elif fmt is Format.NLC: dim = 2 else: dim = 1 return dim def nchw_to(x: torch.Tensor, fmt: Format): if fmt == Format.NHWC: x = x.permute(0, 2, 3, 1) elif fmt == Format.NLC: x = x.flatten(2).transpose(1, 2) elif fmt == Format.NCL: x = x.flatten(2) return x def nhwc_to(x: torch.Tensor, fmt: Format): if fmt == Format.NCHW: x = x.permute(0, 3, 1, 2) elif fmt == Format.NLC: x = x.flatten(1, 2) elif fmt == Format.NCL: x = x.flatten(1, 2).transpose(1, 2) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/separable_conv.py
""" Depthwise Separable Conv Modules Basic DWS convs. Other variations of DWS exist with batch norm or activations between the DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): """ Separable Conv w/ trailing Norm and Activation """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): """ Separable Conv """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/fast_norm.py
""" 'Fast' Normalization Functions For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32. Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast) Hacked together by / Copyright 2022 Ross Wightman """ from typing import List, Optional import torch from torch.nn import functional as F try: from apex.normalization.fused_layer_norm import fused_layer_norm_affine has_apex = True except ImportError: has_apex = False try: from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm has_apex_rmsnorm = True except ImportError: has_apex_rmsnorm = False # fast (ie lower precision LN) can be disabled with this flag if issues crop up _USE_FAST_NORM = False # defaulting to False for now def is_fast_norm(): return _USE_FAST_NORM def set_fast_norm(enable=True): global _USE_FAST_NORM _USE_FAST_NORM = enable def fast_group_norm( x: torch.Tensor, num_groups: int, weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.group_norm(x, num_groups, weight, bias, eps) if torch.is_autocast_enabled(): # normally native AMP casts GN inputs to float32 # here we use the low precision autocast dtype # FIXME what to do re CPU autocast? dt = torch.get_autocast_gpu_dtype() x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.group_norm(x, num_groups, weight, bias, eps) def fast_layer_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.layer_norm(x, normalized_shape, weight, bias, eps) if has_apex: return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) if torch.is_autocast_enabled(): # normally native AMP casts LN inputs to float32 # apex LN does not, this is behaving like Apex dt = torch.get_autocast_gpu_dtype() # FIXME what to do re CPU autocast? x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.layer_norm(x, normalized_shape, weight, bias, eps) def rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ): norm_ndim = len(normalized_shape) if torch.jit.is_scripting(): # ndim = len(x.shape) # dims = list(range(ndim - norm_ndim, ndim)) # this doesn't work on pytorch <= 1.13.x # NOTE -ve dims cause torchscript to crash in some cases, out of options to work around assert norm_ndim == 1 v = torch.var(x, dim=-1).unsqueeze(-1) # ts crashes with -ve dim + keepdim=True else: dims = tuple(range(-1, -norm_ndim - 1, -1)) v = torch.var(x, dim=dims, keepdim=True) x = x * torch.rsqrt(v + eps) if weight is not None: x = x * weight return x def fast_rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ) -> torch.Tensor: if torch.jit.is_scripting(): # this must be by itself, cannot merge with has_apex_rmsnorm return rms_norm(x, normalized_shape, weight, eps) if has_apex_rmsnorm: if weight is None: return fused_rms_norm(x, normalized_shape, eps) else: return fused_rms_norm_affine(x, weight, normalized_shape, eps) # fallback return rms_norm(x, normalized_shape, weight, eps)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/selective_kernel.py
""" Selective Kernel Convolution/Attention Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv_bn_act import ConvNormActAa from .helpers import make_divisible from .trace_utils import _assert def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert k >= 3 and k % 2 class SelectiveKernelAttn(nn.Module): def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): """ Selective Kernel Attention Module Selective Kernel attention mechanism factored out into its own module. """ super(SelectiveKernelAttn, self).__init__() self.num_paths = num_paths self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) self.bn = norm_layer(attn_channels) self.act = act_layer(inplace=True) self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) def forward(self, x): _assert(x.shape[1] == self.num_paths, '') x = x.sum(1).mean((2, 3), keepdim=True) x = self.fc_reduce(x) x = self.bn(x) x = self.act(x) x = self.fc_select(x) B, C, H, W = x.shape x = x.view(B, self.num_paths, C // self.num_paths, H, W) x = torch.softmax(x, dim=1) return x class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): """ Selective Kernel Convolution Module As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. Largest change is the input split, which divides the input channels across each convolution path, this can be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps the parameter count from ballooning when the convolutions themselves don't have groups, but still provides a noteworthy increase in performance over similar param count models without this attention layer. -Ross W Args: in_channels (int): module input (feature) channel count out_channels (int): module output (feature) channel count kernel_size (int, list): kernel size for each convolution branch stride (int): stride for convolutions dilation (int): dilation for module as a whole, impacts dilation of each branch groups (int): number of groups for each branch rd_ratio (int, float): reduction factor for attention features keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, can be viewed as grouping by path, output expands to module out_channels count act_layer (nn.Module): activation layer to use norm_layer (nn.Module): batchnorm/norm layer to use aa_layer (nn.Module): anti-aliasing module drop_layer (nn.Module): spatial drop module in convs (drop block, etc) """ super(SelectiveKernel, self).__init__() out_channels = out_channels or in_channels kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation _kernel_valid(kernel_size) if not isinstance(kernel_size, list): kernel_size = [kernel_size] * 2 if keep_3x3: dilation = [dilation * (k - 1) // 2 for k in kernel_size] kernel_size = [3] * len(kernel_size) else: dilation = [dilation] * len(kernel_size) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert in_channels % self.num_paths == 0 in_channels = in_channels // self.num_paths groups = min(out_channels, groups) conv_kwargs = dict( stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for k, d in zip(kernel_size, dilation)]) attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, self.in_channels // self.num_paths, 1) x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = x * x_attn x = torch.sum(x, dim=1) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/patch_embed.py
""" Image to Patch Embedding using Conv2d A convolution based approach to patchifying a 2D image w/ embedding projection. Based on code in: * https://github.com/google-research/vision_transformer * https://github.com/google-research/big_vision/tree/main/big_vision Hacked together by / Copyright 2020 Ross Wightman """ import logging from typing import Callable, List, Optional, Tuple, Union import torch from torch import nn as nn import torch.nn.functional as F from .format import Format, nchw_to from .helpers import to_2tuple from .trace_utils import _assert _logger = logging.getLogger(__name__) class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ output_fmt: Format dynamic_img_pad: torch.jit.Final[bool] def __init__( self, img_size: Optional[int] = 224, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, norm_layer: Optional[Callable] = None, flatten: bool = True, output_fmt: Optional[str] = None, bias: bool = True, strict_img_size: bool = True, dynamic_img_pad: bool = False, ): super().__init__() self.patch_size = to_2tuple(patch_size) if img_size is not None: self.img_size = to_2tuple(img_size) self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size)]) self.num_patches = self.grid_size[0] * self.grid_size[1] else: self.img_size = None self.grid_size = None self.num_patches = None if output_fmt is not None: self.flatten = False self.output_fmt = Format(output_fmt) else: # flatten spatial dim and transpose to channels last, kept for bwd compat self.flatten = flatten self.output_fmt = Format.NCHW self.strict_img_size = strict_img_size self.dynamic_img_pad = dynamic_img_pad self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape if self.img_size is not None: if self.strict_img_size: _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).") elif not self.dynamic_img_pad: _assert( H % self.patch_size[0] == 0, f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})." ) _assert( W % self.patch_size[1] == 0, f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})." ) if self.dynamic_img_pad: pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0] pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1] x = F.pad(x, (0, pad_w, 0, pad_h)) x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # NCHW -> NLC elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return x class PatchEmbedWithSize(PatchEmbed): """ 2D Image to Patch Embedding """ output_fmt: Format def __init__( self, img_size: Optional[int] = 224, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, norm_layer: Optional[Callable] = None, flatten: bool = True, output_fmt: Optional[str] = None, bias: bool = True, ): super().__init__( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, flatten=flatten, output_fmt=output_fmt, bias=bias, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: B, C, H, W = x.shape if self.img_size is not None: _assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).") _assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).") x = self.proj(x) grid_size = x.shape[-2:] if self.flatten: x = x.flatten(2).transpose(1, 2) # NCHW -> NLC elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return x, grid_size def resample_patch_embed( patch_embed, new_size: List[int], interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): """Resample the weights of the patch embedding kernel to target resolution. We resample the patch embedding kernel by approximately inverting the effect of patch resizing. Code based on: https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py With this resizing, we can for example load a B/8 filter into a B/16 model and, on 2x larger input image, the result will match. Args: patch_embed: original parameter to be resized. new_size (tuple(int, int): target shape (height, width)-only. interpolation (str): interpolation for resize antialias (bool): use anti-aliasing filter in resize verbose (bool): log operation Returns: Resized patch embedding kernel. """ import numpy as np try: import functorch vmap = functorch.vmap except ImportError: if hasattr(torch, 'vmap'): vmap = torch.vmap else: assert False, "functorch or a version of torch with vmap is required for FlexiViT resizing." assert len(patch_embed.shape) == 4, "Four dimensions expected" assert len(new_size) == 2, "New shape should only be hw" old_size = patch_embed.shape[-2:] if tuple(old_size) == tuple(new_size): return patch_embed if verbose: _logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.") def resize(x_np, _new_size): x_tf = torch.Tensor(x_np)[None, None, ...] x_upsampled = F.interpolate( x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy() return x_upsampled def get_resize_mat(_old_size, _new_size): mat = [] for i in range(np.prod(_old_size)): basis_vec = np.zeros(_old_size) basis_vec[np.unravel_index(i, _old_size)] = 1. mat.append(resize(basis_vec, _new_size).reshape(-1)) return np.stack(mat).T resize_mat = get_resize_mat(old_size, new_size) resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device) def resample_kernel(kernel): resampled_kernel = resize_mat_pinv @ kernel.reshape(-1) return resampled_kernel.reshape(new_size) v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1) orig_dtype = patch_embed.dtype patch_embed = patch_embed.float() patch_embed = v_resample_kernel(patch_embed) patch_embed = patch_embed.to(orig_dtype) return patch_embed # def divs(n, m=None): # m = m or n // 2 # if m == 1: # return [1] # if n % m == 0: # return [m] + divs(n, m - 1) # return divs(n, m - 1) # # # class FlexiPatchEmbed(nn.Module): # """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT) # FIXME WIP # """ # def __init__( # self, # img_size=240, # patch_size=16, # in_chans=3, # embed_dim=768, # base_img_size=240, # base_patch_size=32, # norm_layer=None, # flatten=True, # bias=True, # ): # super().__init__() # self.img_size = to_2tuple(img_size) # self.patch_size = to_2tuple(patch_size) # self.num_patches = 0 # # # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48) # self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30) # # self.base_img_size = to_2tuple(base_img_size) # self.base_patch_size = to_2tuple(base_patch_size) # self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)]) # self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1] # # self.flatten = flatten # self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias) # self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() # # def forward(self, x): # B, C, H, W = x.shape # # if self.patch_size == self.base_patch_size: # weight = self.proj.weight # else: # weight = resample_patch_embed(self.proj.weight, self.patch_size) # patch_size = self.patch_size # x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size) # if self.flatten: # x = x.flatten(2).transpose(1, 2) # BCHW -> BNC # x = self.norm(x) # return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_norm.py
""" Norm Layer Factory Create norm modules by string (to mirror create_act and creat_norm-act fns) Copyright 2022 Ross Wightman """ import functools import types from typing import Type import torch.nn as nn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from torchvision.ops.misc import FrozenBatchNorm2d _NORM_MAP = dict( batchnorm=nn.BatchNorm2d, batchnorm2d=nn.BatchNorm2d, batchnorm1d=nn.BatchNorm1d, groupnorm=GroupNorm, groupnorm1=GroupNorm1, layernorm=LayerNorm, layernorm2d=LayerNorm2d, rmsnorm=RmsNorm, frozenbatchnorm2d=FrozenBatchNorm2d, ) _NORM_TYPES = {m for n, m in _NORM_MAP.items()} def create_norm_layer(layer_name, num_features, **kwargs): layer = get_norm_layer(layer_name) layer_instance = layer(num_features, **kwargs) return layer_instance def get_norm_layer(norm_layer): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) norm_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '') norm_layer = _NORM_MAP[layer_name] else: norm_layer = norm_layer if norm_kwargs: norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args return norm_layer
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/padding.py
""" Padding Helpers Hacked together by / Copyright 2020 Ross Wightman """ import math from typing import List, Tuple import torch import torch.nn.functional as F # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): if isinstance(x, torch.Tensor): return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) else: return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 def pad_same_arg( input_size: List[int], kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), ) -> List[int]: ih, iw = input_size kh, kw = kernel_size pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same( x, kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), value: float = 0, ): ih, iw = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == 'same': # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == 'valid': # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/global_context.py
""" Global Context Attention Block Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` - https://arxiv.org/abs/1904.11492 Official code consulted as reference: https://github.com/xvjiarui/GCNet Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): B, C, H, W = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/norm_act.py
""" Normalization + Activation Layers Provides Norm+Act fns for standard PyTorch norm layers such as * BatchNorm * GroupNorm * LayerNorm This allows swapping with alternative layers that are natively both norm + act such as * EvoNorm (evo_norm.py) * FilterResponseNorm (filter_response_norm.py) * InplaceABN (inplace_abn.py) Hacked together by / Copyright 2022 Ross Wightman """ from typing import Union, List, Optional, Any import torch from torch import nn as nn from torch.nn import functional as F from torchvision.ops.misc import FrozenBatchNorm2d from .create_act import get_act_layer from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm from .trace_utils import _assert def _create_act(act_layer, act_kwargs=None, inplace=False, apply_act=True): act_layer = get_act_layer(act_layer) # string -> nn.Module act_kwargs = act_kwargs or {} if act_layer is not None and apply_act: if inplace: act_kwargs['inplace'] = inplace act = act_layer(**act_kwargs) else: act = nn.Identity() return act class BatchNormAct2d(nn.BatchNorm2d): """BatchNorm + Activation This module performs BatchNorm + Activation in a manner that will remain backwards compatible with weights trained with separate bn, act. This is why we inherit from BN instead of composing it as a .bn member. """ def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, device=None, dtype=None, ): try: factory_kwargs = {'device': device, 'dtype': dtype} super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, **factory_kwargs, ) except TypeError: # NOTE for backwards compat with old PyTorch w/o factory device/dtype support super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') # exponential_average_factor is set to self.momentum # (when it is available) only so that it gets updated # in ONNX graph when this node is exported to ONNX. if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: # TODO: if statement only here to tell the jit to skip emitting this when it is None if self.num_batches_tracked is not None: # type: ignore[has-type] self.num_batches_tracked.add_(1) # type: ignore[has-type] if self.momentum is None: # use cumulative moving average exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: # use exponential moving average exponential_average_factor = self.momentum r""" Decide whether the mini-batch stats should be used for normalization rather than the buffers. Mini-batch stats are used in training mode, and in eval mode when buffers are None. """ if self.training: bn_training = True else: bn_training = (self.running_mean is None) and (self.running_var is None) r""" Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are used for normalization (i.e. in eval mode when buffers are not None). """ x = F.batch_norm( x, # If buffers are not to be tracked, ensure that they won't be updated self.running_mean if not self.training or self.track_running_stats else None, self.running_var if not self.training or self.track_running_stats else None, self.weight, self.bias, bn_training, exponential_average_factor, self.eps, ) x = self.drop(x) x = self.act(x) return x class SyncBatchNormAct(nn.SyncBatchNorm): # Thanks to Selim Seferbekov (https://github.com/rwightman/pytorch-image-models/issues/1254) # This is a quick workaround to support SyncBatchNorm for timm BatchNormAct2d layers # but ONLY when used in conjunction with the timm conversion function below. # Do not create this module directly or use the PyTorch conversion function. def forward(self, x: torch.Tensor) -> torch.Tensor: x = super().forward(x) # SyncBN doesn't work with torchscript anyways, so this is fine if hasattr(self, "drop"): x = self.drop(x) if hasattr(self, "act"): x = self.act(x) return x def convert_sync_batchnorm(module, process_group=None): # convert both BatchNorm and BatchNormAct layers to Synchronized variants module_output = module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if isinstance(module, BatchNormAct2d): # convert timm norm + act layer module_output = SyncBatchNormAct( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group=process_group, ) # set act and drop attr from the original module module_output.act = module.act module_output.drop = module.drop else: # convert standard BatchNorm layers module_output = torch.nn.SyncBatchNorm( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group, ) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked if hasattr(module, "qconfig"): module_output.qconfig = module.qconfig for name, child in module.named_children(): module_output.add_module(name, convert_sync_batchnorm(child, process_group)) del module return module_output class FrozenBatchNormAct2d(torch.nn.Module): """ BatchNormAct2d where the batch statistics and the affine parameters are fixed Args: num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)`` eps (float): a value added to the denominator for numerical stability. Default: 1e-5 """ def __init__( self, num_features: int, eps: float = 1e-5, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super().__init__() self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def _load_from_state_dict( self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str], ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x: torch.Tensor) -> torch.Tensor: # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) scale = w * (rv + self.eps).rsqrt() bias = b - rm * scale x = x * scale + bias x = self.act(self.drop(x)) return x def __repr__(self) -> str: return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps}, act={self.act})" def freeze_batch_norm_2d(module): """ Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, (BatchNormAct2d, SyncBatchNormAct)): res = FrozenBatchNormAct2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = freeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def unfreeze_batch_norm_2d(module): """ Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, FrozenBatchNormAct2d): res = BatchNormAct2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, FrozenBatchNorm2d): res = torch.nn.BatchNorm2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = unfreeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def _num_groups(num_channels, num_groups, group_size): if group_size: assert num_channels % group_size == 0 return num_channels // group_size return num_groups class GroupNormAct(nn.GroupNorm): # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args def __init__( self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(GroupNormAct, self).__init__( _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class GroupNorm1Act(nn.GroupNorm): def __init__( self, num_channels, eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(GroupNorm1Act, self).__init__(1, num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct(nn.LayerNorm): def __init__( self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct2d(nn.LayerNorm): def __init__( self, num_channels, eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/attention_pool.py
from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from .config import use_fused_attn from .mlp import Mlp from .weight_init import trunc_normal_tf_ class AttentionPoolLatent(nn.Module): """ Attention pooling w/ latent query """ fused_attn: torch.jit.Final[bool] def __init__( self, in_features: int, out_features: int = None, embed_dim: int = None, num_heads: int = 8, mlp_ratio: float = 4.0, qkv_bias: bool = True, qk_norm: bool = False, latent_len: int = 1, latent_dim: int = None, pos_embed: str = '', pool_type: str = 'token', norm_layer: Optional[nn.Module] = None, drop: float = 0.0, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 self.pool = pool_type self.fused_attn = use_fused_attn() if pos_embed == 'abs': spatial_len = self.feat_size self.pos_embed = nn.Parameter(torch.zeros(spatial_len, in_features)) else: self.pos_embed = None self.latent_dim = latent_dim or embed_dim self.latent_len = latent_len self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim)) self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.proj = nn.Linear(embed_dim, embed_dim) self.proj_drop = nn.Dropout(drop) self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity() self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio)) self.init_weights() def init_weights(self): if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5) trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5) def forward(self, x): B, N, C = x.shape if self.pos_embed is not None: # FIXME interpolate x = x + self.pos_embed.unsqueeze(0).to(x.dtype) q_latent = self.latent.expand(B, -1, -1) q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2) kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q, k = self.q_norm(q), self.k_norm(k) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, self.latent_len, C) x = self.proj(x) x = self.proj_drop(x) x = x + self.mlp(self.norm(x)) # optional pool if latent seq_len > 1 and pooled output is desired if self.pool == 'token': x = x[:, 0] elif self.pool == 'avg': x = x.mean(1) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed_rel.py
""" Relative position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math import os from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .interpolate import RegularGridInterpolator from .mlp import Mlp from .weight_init import trunc_normal_ _USE_SCIPY = int(os.environ.get('TIMM_USE_SCIPY_INTERP', 0)) > 0 def gen_relative_position_index( q_size: Tuple[int, int], k_size: Optional[Tuple[int, int]] = None, class_token: bool = False, ) -> torch.Tensor: # Adapted with significant modifications from Swin / BeiT codebases # get pair-wise relative position index for each token inside the window assert k_size is None, 'Different q & k sizes not currently supported' # FIXME coords = torch.stack( torch.meshgrid([ torch.arange(q_size[0]), torch.arange(q_size[1]) ]) ).flatten(1) # 2, Wh, Ww relative_coords = coords[:, :, None] - coords[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 relative_coords[:, :, 0] += q_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += q_size[1] - 1 relative_coords[:, :, 0] *= 2 * q_size[1] - 1 num_relative_distance = (2 * q_size[0] - 1) * (2 * q_size[1] - 1) # else: # # FIXME different q vs k sizes is a WIP, need to better offset the two grids? # q_coords = torch.stack( # torch.meshgrid([ # torch.arange(q_size[0]), # torch.arange(q_size[1]) # ]) # ).flatten(1) # 2, Wh, Ww # k_coords = torch.stack( # torch.meshgrid([ # torch.arange(k_size[0]), # torch.arange(k_size[1]) # ]) # ).flatten(1) # relative_coords = q_coords[:, :, None] - k_coords[:, None, :] # 2, Wh*Ww, Wh*Ww # relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 # relative_coords[:, :, 0] += max(q_size[0], k_size[0]) - 1 # shift to start from 0 # relative_coords[:, :, 1] += max(q_size[1], k_size[1]) - 1 # relative_coords[:, :, 0] *= k_size[1] + q_size[1] - 1 # relative_position_index = relative_coords.sum(-1) # Qh*Qw, Kh*Kw # num_relative_distance = (q_size[0] + k_size[0] - 1) * (q_size[1] + k_size[1] - 1) + 3 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww if class_token: # handle cls to token & token 2 cls & cls to cls as per beit for rel pos bias # NOTE not intended or tested with MLP log-coords relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0]) relative_position_index[0, 0:] = num_relative_distance relative_position_index[0:, 0] = num_relative_distance + 1 relative_position_index[0, 0] = num_relative_distance + 2 return relative_position_index.contiguous() def resize_rel_pos_bias_table_simple( rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...], ): dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: # TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported _, dst_h, dst_w = new_bias_shape num_attn_heads, src_h, src_w = rel_pos_bias.shape assert dst_h == dst_size[0] and dst_w == dst_size[1] if src_h != dst_h or src_w != dst_w: rel_pos_bias = torch.nn.functional.interpolate( rel_pos_bias.unsqueeze(0), size=dst_size, mode="bicubic", align_corners=False, ).squeeze(0) else: assert rel_pos_bias.ndim == 2 # (num_pos, num_heads) (aka flat) bias shape dst_num_pos, _ = new_bias_shape src_num_pos, num_attn_heads = rel_pos_bias.shape num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1]) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) # FIXME could support non-equal src if argument passed if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None rel_pos_bias = torch.nn.functional.interpolate( rel_pos_bias.transpose(1, 0).reshape((1, -1, src_size[0], src_size[1])), size=dst_size, mode="bicubic", align_corners=False, ).view(-1, dst_num_pos - num_extra_tokens).transpose(0, 1) if extra_tokens is not None: rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias def resize_rel_pos_bias_table_levit( position_bias_table, new_size, interpolation: str = 'bicubic', antialias: bool = True, ): """ Resample relative position bias table suggested in LeVit Adapted from: https://github.com/microsoft/Cream/blob/main/TinyViT/utils.py """ L1, nH1 = position_bias_table.size() L2, nH2 = new_size assert nH1 == nH2 if L1 != L2: orig_dtype = position_bias_table.dtype position_bias_table = position_bias_table.float() # bicubic interpolate relative_position_bias_table if not match S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) relative_position_bias_table_resized = F.interpolate( position_bias_table.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode=interpolation, antialias=antialias) relative_position_bias_table_resized = \ relative_position_bias_table_resized.view(nH2, L2).permute(1, 0) relative_position_bias_table_resized.to(orig_dtype) return relative_position_bias_table_resized else: return position_bias_table def resize_rel_pos_bias_table( rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...], ): """ Resize relative position bias table using more advanced interpolation. Modified from code in Microsoft Unilm (https://github.com/microsoft/unilm) repo (BeiT, BeiT-v2, etc). https://github.com/microsoft/unilm/blob/5255d52de86dad642810f5849dd357769346c1d7/beit/run_class_finetuning.py#L351 Args: rel_pos_bias: new_window_size: new_bias_shape: Returns: """ if _USE_SCIPY: from scipy import interpolate dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: # TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported num_extra_tokens = 0 _, dst_h, dst_w = new_bias_shape assert dst_h == dst_size[0] and dst_w == dst_size[1] num_attn_heads, src_h, src_w = rel_pos_bias.shape src_size = (src_h, src_w) has_flat_shape = False else: assert rel_pos_bias.ndim == 2 # (num_pos, num_heads) (aka flat) bias shape dst_num_pos, _ = new_bias_shape src_num_pos, num_attn_heads = rel_pos_bias.shape num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1]) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) has_flat_shape = True if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: # print("Interpolating position from %dx%d to %dx%d" % (src_size[0], src_size[1], dst_size[0], dst_size[1])) if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) def _calc(src, dst): left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src // 2) if gp > dst // 2: right = q else: left = q dis = [] cur = 1 for i in range(src // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] return r_ids + [0] + dis y = _calc(src_size[0], dst_size[0]) x = _calc(src_size[1], dst_size[1]) yx = [torch.tensor(y), torch.tensor(x)] # print("Original positions = %s" % str(x)) ty = dst_size[0] // 2.0 tx = dst_size[1] // 2.0 dy = torch.arange(-ty, ty + 0.1, 1.0) dx = torch.arange(-tx, tx + 0.1, 1.0) dyx = torch.meshgrid([dy, dx]) # print("Target positions = %s" % str(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): if has_flat_shape: z = rel_pos_bias[:, i].view(src_size[0], src_size[1]).float() else: z = rel_pos_bias[i, :, :].float() if _USE_SCIPY: # Original beit code uses scipy w/ cubic interpolation f = interpolate.interp2d(x, y, z.numpy(), kind='cubic') r = torch.Tensor(f(dx, dy)).contiguous().to(rel_pos_bias.device) else: # Without scipy dependency, I've found a reasonably simple impl # that supports uneven spaced interpolation pts with 'linear' interp. # Results are comparable to scipy for model accuracy in most cases. f = RegularGridInterpolator(yx, z) r = f(dyx).contiguous().to(rel_pos_bias.device) if has_flat_shape: r = r.view(-1, 1) all_rel_pos_bias.append(r) if has_flat_shape: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) else: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=0) if extra_tokens is not None: assert has_flat_shape rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias class RelPosBias(nn.Module): """ Relative Position Bias Adapted from Swin-V1 relative position bias impl, modularized. """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,) num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) self.register_buffer( "relative_position_index", gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0).view(-1), persistent=False, ) self.init_weights() def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[self.relative_position_index] # win_h * win_w, win_h * win_w, num_heads relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def gen_relative_log_coords( win_size: Tuple[int, int], pretrained_win_size: Tuple[int, int] = (0, 0), mode='swin', ): assert mode in ('swin', 'cr') # as per official swin-v2 impl, supporting timm specific 'cr' log coords as well relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0], dtype=torch.float32) relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1], dtype=torch.float32) relative_coords_table = torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w])) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() # 2*Wh-1, 2*Ww-1, 2 if mode == 'swin': if pretrained_win_size[0] > 0: relative_coords_table[:, :, 0] /= (pretrained_win_size[0] - 1) relative_coords_table[:, :, 1] /= (pretrained_win_size[1] - 1) else: relative_coords_table[:, :, 0] /= (win_size[0] - 1) relative_coords_table[:, :, 1] /= (win_size[1] - 1) relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2( 1.0 + relative_coords_table.abs()) / math.log2(8) else: # mode == 'cr' relative_coords_table = torch.sign(relative_coords_table) * torch.log( 1.0 + relative_coords_table.abs()) return relative_coords_table class RelPosMlp(nn.Module): """ Log-Coordinate Relative Position MLP Based on ideas presented in Swin-V2 paper (https://arxiv.org/abs/2111.09883) This impl covers the 'swin' implementation as well as two timm specific modes ('cr', and 'rw') """ def __init__( self, window_size, num_heads=8, hidden_dim=128, prefix_tokens=0, mode='cr', pretrained_window_size=(0, 0) ): super().__init__() self.window_size = window_size self.window_area = self.window_size[0] * self.window_size[1] self.prefix_tokens = prefix_tokens self.num_heads = num_heads self.bias_shape = (self.window_area,) * 2 + (num_heads,) if mode == 'swin': self.bias_act = nn.Sigmoid() self.bias_gain = 16 mlp_bias = (True, False) else: self.bias_act = nn.Identity() self.bias_gain = None mlp_bias = True self.mlp = Mlp( 2, # x, y hidden_features=hidden_dim, out_features=num_heads, act_layer=nn.ReLU, bias=mlp_bias, drop=(0.125, 0.) ) self.register_buffer( "relative_position_index", gen_relative_position_index(window_size).view(-1), persistent=False) # get relative_coords_table self.register_buffer( "rel_coords_log", gen_relative_log_coords(window_size, pretrained_window_size, mode=mode), persistent=False) def get_bias(self) -> torch.Tensor: relative_position_bias = self.mlp(self.rel_coords_log) if self.relative_position_index is not None: relative_position_bias = relative_position_bias.view(-1, self.num_heads)[self.relative_position_index] relative_position_bias = relative_position_bias.view(self.bias_shape) relative_position_bias = relative_position_bias.permute(2, 0, 1) relative_position_bias = self.bias_act(relative_position_bias) if self.bias_gain is not None: relative_position_bias = self.bias_gain * relative_position_bias if self.prefix_tokens: relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0]) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def generate_lookup_tensor( length: int, max_relative_position: Optional[int] = None, ): """Generate a one_hot lookup tensor to reindex embeddings along one dimension. Args: length: the length to reindex to. max_relative_position: the maximum relative position to consider. Relative position embeddings for distances above this threshold are zeroed out. Returns: a lookup Tensor of size [length, length, vocab_size] that satisfies ret[n,m,v] = 1{m - n + max_relative_position = v}. """ if max_relative_position is None: max_relative_position = length - 1 # Return the cached lookup tensor, otherwise compute it and cache it. vocab_size = 2 * max_relative_position + 1 ret = torch.zeros(length, length, vocab_size) for i in range(length): for x in range(length): v = x - i + max_relative_position if abs(x - i) > max_relative_position: continue ret[i, x, v] = 1 return ret def reindex_2d_einsum_lookup( relative_position_tensor, height: int, width: int, height_lookup: torch.Tensor, width_lookup: torch.Tensor, ) -> torch.Tensor: """Reindex 2d relative position bias with 2 independent einsum lookups. Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py Args: relative_position_tensor: tensor of shape [..., vocab_height, vocab_width, ...]. height: height to reindex to. width: width to reindex to. height_lookup: one-hot height lookup width_lookup: one-hot width lookup Returns: reindexed_tensor: a Tensor of shape [..., height * width, height * width, ...] """ reindexed_tensor = torch.einsum('nhw,ixh->nixw', relative_position_tensor, height_lookup) reindexed_tensor = torch.einsum('nixw,jyw->nijxy', reindexed_tensor, width_lookup) area = height * width return reindexed_tensor.reshape(relative_position_tensor.shape[0], area, area) class RelPosBiasTf(nn.Module): """ Relative Position Bias Impl (Compatible with Tensorflow MaxViT models) Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.num_heads = num_heads vocab_height = 2 * window_size[0] - 1 vocab_width = 2 * window_size[1] - 1 self.bias_shape = (self.num_heads, vocab_height, vocab_width) self.relative_position_bias_table = nn.Parameter(torch.zeros(self.bias_shape)) self.register_buffer('height_lookup', generate_lookup_tensor(window_size[0]), persistent=False) self.register_buffer('width_lookup', generate_lookup_tensor(window_size[1]), persistent=False) self.init_weights() def init_weights(self): nn.init.normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: # FIXME change to not use one-hot/einsum? return reindex_2d_einsum_lookup( self.relative_position_bias_table, self.window_size[0], self.window_size[1], self.height_lookup, self.width_lookup ) def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/asymmetric_loss.py
import torch import torch.nn as nn class AsymmetricLossMultiLabel(nn.Module): def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): super(AsymmetricLossMultiLabel, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps def forward(self, x, y): """" Parameters ---------- x: input logits y: targets (multi-label binarized vector) """ # Calculating Probabilities x_sigmoid = torch.sigmoid(x) xs_pos = x_sigmoid xs_neg = 1 - x_sigmoid # Asymmetric Clipping if self.clip is not None and self.clip > 0: xs_neg = (xs_neg + self.clip).clamp(max=1) # Basic CE calculation los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) loss = los_pos + los_neg # Asymmetric Focusing if self.gamma_neg > 0 or self.gamma_pos > 0: if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(False) pt0 = xs_pos * y pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p pt = pt0 + pt1 one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) one_sided_w = torch.pow(1 - pt, one_sided_gamma) if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(True) loss *= one_sided_w return -loss.sum() class AsymmetricLossSingleLabel(nn.Module): def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): super(AsymmetricLossSingleLabel, self).__init__() self.eps = eps self.logsoftmax = nn.LogSoftmax(dim=-1) self.targets_classes = [] # prevent gpu repeated memory allocation self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.reduction = reduction def forward(self, inputs, target, reduction=None): """" Parameters ---------- x: input logits y: targets (1-hot vector) """ num_classes = inputs.size()[-1] log_preds = self.logsoftmax(inputs) self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) # ASL weights targets = self.targets_classes anti_targets = 1 - targets xs_pos = torch.exp(log_preds) xs_neg = 1 - xs_pos xs_pos = xs_pos * targets xs_neg = xs_neg * anti_targets asymmetric_w = torch.pow(1 - xs_pos - xs_neg, self.gamma_pos * targets + self.gamma_neg * anti_targets) log_preds = log_preds * asymmetric_w if self.eps > 0: # label smoothing self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes) # loss calculation loss = - self.targets_classes.mul(log_preds) loss = loss.sum(dim=-1) if self.reduction == 'mean': loss = loss.mean() return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/jsd.py
import torch import torch.nn as nn import torch.nn.functional as F from .cross_entropy import LabelSmoothingCrossEntropy class JsdCrossEntropy(nn.Module): """ Jensen-Shannon Divergence + Cross-Entropy Loss Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 Hacked together by / Copyright 2020 Ross Wightman """ def __init__(self, num_splits=3, alpha=12, smoothing=0.1): super().__init__() self.num_splits = num_splits self.alpha = alpha if smoothing is not None and smoothing > 0: self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) else: self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def __call__(self, output, target): split_size = output.shape[0] // self.num_splits assert split_size * self.num_splits == output.shape[0] logits_split = torch.split(output, split_size) # Cross-entropy is only computed on clean images loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) probs = [F.softmax(logits, dim=1) for logits in logits_split] # Clamp mixture distribution to avoid exploding KL divergence logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() loss += self.alpha * sum([F.kl_div( logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/__init__.py
from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel from .binary_cross_entropy import BinaryCrossEntropy from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from .jsd import JsdCrossEntropy
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/cross_entropy.py
""" Cross Entropy w/ smoothing or soft targets Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.1): super(LabelSmoothingCrossEntropy, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1. - smoothing def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() class SoftTargetCrossEntropy(nn.Module): def __init__(self): super(SoftTargetCrossEntropy, self).__init__() def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) return loss.mean()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/binary_cross_entropy.py
""" Binary Cross Entropy w/ a few extras Hacked together by / Copyright 2021 Ross Wightman """ from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F class BinaryCrossEntropy(nn.Module): """ BCE with optional one-hot from dense targets, label smoothing, thresholding NOTE for experiments comparing CE to BCE /w label smoothing, may remove """ def __init__( self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, reduction: str = 'mean', sum_classes: bool = False, pos_weight: Optional[Union[torch.Tensor, float]] = None, ): super(BinaryCrossEntropy, self).__init__() assert 0. <= smoothing < 1.0 if pos_weight is not None: if not isinstance(pos_weight, torch.Tensor): pos_weight = torch.tensor(pos_weight) self.smoothing = smoothing self.target_threshold = target_threshold self.reduction = 'none' if sum_classes else reduction self.sum_classes = sum_classes self.register_buffer('weight', weight) self.register_buffer('pos_weight', pos_weight) def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: batch_size = x.shape[0] assert batch_size == target.shape[0] if target.shape != x.shape: # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse num_classes = x.shape[-1] # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ off_value = self.smoothing / num_classes on_value = 1. - self.smoothing + off_value target = target.long().view(-1, 1) target = torch.full( (batch_size, num_classes), off_value, device=x.device, dtype=x.dtype).scatter_(1, target, on_value) if self.target_threshold is not None: # Make target 0, or 1 if threshold set target = target.gt(self.target_threshold).to(dtype=target.dtype) loss = F.binary_cross_entropy_with_logits( x, target, self.weight, pos_weight=self.pos_weight, reduction=self.reduction, ) if self.sum_classes: loss = loss.sum(-1).mean() return loss
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/convert/convert_nest_flax.py
""" Convert weights from https://github.com/google-research/nested-transformer NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt """ import sys import numpy as np import torch from clu import checkpoint arch_depths = { 'nest_base': [2, 2, 20], 'nest_small': [2, 2, 20], 'nest_tiny': [2, 2, 8], } def convert_nest(checkpoint_path, arch): """ Expects path to checkpoint which is a dir containing 4 files like in each of these folders - https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints `arch` is needed to Returns a state dict that can be used with `torch.nn.Module.load_state_dict` Hint: Follow timm.models.nest.Nest.__init__ and https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py """ assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported" flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target'] state_dict = {} # Patch embedding state_dict['patch_embed.proj.weight'] = torch.tensor( flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1) state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias']) # Positional embeddings posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')] for i, k in enumerate(posemb_keys): state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding']) # Transformer encoders depths = arch_depths[arch] for level in range(len(depths)): for layer in range(depths[level]): global_layer_ix = sum(depths[:level]) + layer # Norms for i in range(2): state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor( flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale']) state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor( flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias']) # Attention qkv w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel'] w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel'] # Pay attention to dims here (maybe get pen and paper) w_kv = np.concatenate(np.split(w_kv, 2, -1), 1) w_qkv = np.concatenate([w_q, w_kv], 1) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0) b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias'] b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias'] # Pay attention to dims here (maybe get pen and paper) b_kv = np.concatenate(np.split(b_kv, 2, -1), 0) b_qkv = np.concatenate([b_q, b_kv], 0) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1) # Attention proj w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel'] w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor( flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias']) # MLP for i in range(2): state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor( flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0) state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor( flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias']) # Block aggregations (ConvPool) for level in range(1, len(depths)): # Convs state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor( flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1) state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor( flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias']) # Norms state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor( flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale']) state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor( flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias']) # Final norm state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale']) state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias']) # Classifier state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0) state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias']) return state_dict if __name__ == '__main__': variant = sys.argv[1] # base, small, or tiny state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}') torch.save(state_dict, f'./jx_nest_{variant}.pth')
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/convert/convert_from_mxnet.py
import argparse import hashlib import os import mxnet as mx import gluoncv import torch from timm import create_model parser = argparse.ArgumentParser(description='Convert from MXNet') parser.add_argument('--model', default='all', type=str, metavar='MODEL', help='Name of model to train (default: "all"') def convert(mxnet_name, torch_name): # download and load the pre-trained model net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True) # create corresponding torch model torch_net = create_model(torch_name) mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k] torchp = list(torch_net.named_parameters()) torch_params = {} # convert parameters # NOTE: we are relying on the fact that the order of parameters # are usually exactly the same between these models, thus no key name mapping # is necessary. Asserts will trip if this is not the case. for (tn, tv), (mn, mv) in zip(torchp, mxp): m_split = mn.split('_') t_split = tn.split('.') print(t_split, m_split) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if m_split[-1] == 'gamma': assert t_split[-1] == 'weight' if m_split[-1] == 'beta': assert t_split[-1] == 'bias' # ensure shapes match assert all(t == m for t, m in zip(tv.shape, mv.shape)) torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor # convert buffers (batch norm running stats) mxb = [(k, v) for k, v in net.collect_params().items() if any(x in k for x in ['running_mean', 'running_var'])] torchb = [(k, v) for k, v in torch_net.named_buffers() if 'num_batches' not in k] for (tn, tv), (mn, mv) in zip(torchb, mxb): print(tn, mn) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if 'running_var' in tn: assert 'running_var' in mn if 'running_mean' in tn: assert 'running_mean' in mn torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor torch_net.load_state_dict(torch_params) torch_filename = './%s.pth' % torch_name torch.save(torch_net.state_dict(), torch_filename) with open(torch_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth' os.rename(torch_filename, final_filename) print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash)) def map_mx_to_torch_model(mx_name): torch_name = mx_name.lower() if torch_name.startswith('se_'): torch_name = torch_name.replace('se_', 'se') elif torch_name.startswith('senet_'): torch_name = torch_name.replace('senet_', 'senet') elif torch_name.startswith('inceptionv3'): torch_name = torch_name.replace('inceptionv3', 'inception_v3') torch_name = 'gluon_' + torch_name return torch_name ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b', 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d', #'resnet50_v1e', 'resnet101_v1e', 'resnet152_v1e', 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3'] def main(): args = parser.parse_args() if not args.model or args.model == 'all': for mx_model in ALL: torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) else: mx_model = args.model torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) if __name__ == '__main__': main()
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/tests/test_models.py
"""Run tests for all models Tests that run on CI should have a specific marker, e.g. @pytest.mark.base. This marker is used to parallelize the CI runs, with one runner for each marker. If new tests are added, ensure that they use one of the existing markers (documented in pyproject.toml > pytest > markers) or that a new marker is added for this set of tests. If using a new marker, adjust the test matrix in .github/workflows/tests.yml to run tests with this new marker, otherwise the tests will be skipped on CI. """ import pytest import torch import platform import os import fnmatch _IS_MAC = platform.system() == 'Darwin' try: from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names, NodePathTracer has_fx_feature_extraction = True except ImportError: has_fx_feature_extraction = False import timm from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value from timm.layers import Format, get_spatial_dim, get_channel_dim from timm.models import get_notrace_modules, get_notrace_functions import importlib import os torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cpu') timeout = os.environ.get('TIMEOUT') timeout120 = int(timeout) if timeout else 120 timeout300 = int(timeout) if timeout else 300 if hasattr(torch._C, '_jit_set_profiling_executor'): # legacy executor is too slow to compile large models for unit tests # no need for the fusion performance here torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(False) # transformer models don't support many of the spatial / feature based model functionalities NON_STD_FILTERS = [ 'vit_*', 'tnt_*', 'pit_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*', 'convit_*', 'levit*', 'visformer*', 'deit*', 'jx_nest_*', 'nest_*', 'xcit_*', 'crossvit_*', 'beit*', 'poolformer_*', 'volo_*', 'sequencer2d_*', 'pvt_v2*', 'mvitv2*', 'gcvit*', 'efficientformer*', 'eva_*', 'flexivit*', 'eva02*', 'samvit_*', 'efficientvit_m*', 'tiny_vit_*' ] NUM_NON_STD = len(NON_STD_FILTERS) # exclude models that cause specific test failures if 'GITHUB_ACTIONS' in os.environ: # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models EXCLUDE_FILTERS = [ '*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm', '*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*', '*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*', '*huge*', '*giant*', '*gigantic*', '*enormous*', 'maxvit_xlarge*', 'regnet*1280', 'regnet*2560'] NON_STD_EXCLUDE_FILTERS = ['*huge*', '*giant*', '*gigantic*', '*enormous*'] else: EXCLUDE_FILTERS = ['*enormous*'] NON_STD_EXCLUDE_FILTERS = ['*gigantic*', '*enormous*'] EXCLUDE_JIT_FILTERS = [] TARGET_FWD_SIZE = MAX_FWD_SIZE = 384 TARGET_BWD_SIZE = 128 MAX_BWD_SIZE = 320 MAX_FWD_OUT_SIZE = 448 TARGET_JIT_SIZE = 128 MAX_JIT_SIZE = 320 TARGET_FFEAT_SIZE = 96 MAX_FFEAT_SIZE = 256 TARGET_FWD_FX_SIZE = 128 MAX_FWD_FX_SIZE = 256 TARGET_BWD_FX_SIZE = 128 MAX_BWD_FX_SIZE = 224 def _get_input_size(model=None, model_name='', target=None): if model is None: assert model_name, "One of model or model_name must be provided" input_size = get_pretrained_cfg_value(model_name, 'input_size') fixed_input_size = get_pretrained_cfg_value(model_name, 'fixed_input_size') min_input_size = get_pretrained_cfg_value(model_name, 'min_input_size') else: default_cfg = model.default_cfg input_size = default_cfg['input_size'] fixed_input_size = default_cfg.get('fixed_input_size', None) min_input_size = default_cfg.get('min_input_size', None) assert input_size is not None if fixed_input_size: return input_size if min_input_size: if target and max(input_size) > target: input_size = min_input_size else: if target and max(input_size) > target: input_size = tuple([min(x, target) for x in input_size]) return input_size @pytest.mark.base @pytest.mark.timeout(timeout120) @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS)) @pytest.mark.parametrize('batch_size', [1]) def test_model_forward(model_name, batch_size): """Run a single forward pass with each model""" model = create_model(model_name, pretrained=False) model.eval() input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE) if max(input_size) > MAX_FWD_SIZE: pytest.skip("Fixed input size model > limit.") inputs = torch.randn((batch_size, *input_size)) inputs = inputs.to(torch_device) model.to(torch_device) outputs = model(inputs) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs' @pytest.mark.base @pytest.mark.timeout(timeout120) @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True)) @pytest.mark.parametrize('batch_size', [2]) def test_model_backward(model_name, batch_size): """Run a single forward pass with each model""" input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE) if max(input_size) > MAX_BWD_SIZE: pytest.skip("Fixed input size model > limit.") model = create_model(model_name, pretrained=False, num_classes=42) num_params = sum([x.numel() for x in model.parameters()]) model.train() inputs = torch.randn((batch_size, *input_size)) inputs = inputs.to(torch_device) model.to(torch_device) outputs = model(inputs) if isinstance(outputs, tuple): outputs = torch.cat(outputs) outputs.mean().backward() for n, x in model.named_parameters(): assert x.grad is not None, f'No gradient for {n}' num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) assert outputs.shape[-1] == 42 assert num_params == num_grad, 'Some parameters are missing gradients' assert not torch.isnan(outputs).any(), 'Output included NaNs' @pytest.mark.cfg @pytest.mark.timeout(timeout300) @pytest.mark.parametrize('model_name', list_models( exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_default_cfgs(model_name, batch_size): """Run a single forward pass with each model""" model = create_model(model_name, pretrained=False) model.eval() model.to(torch_device) state_dict = model.state_dict() cfg = model.default_cfg pool_size = cfg['pool_size'] input_size = model.default_cfg['input_size'] output_fmt = getattr(model, 'output_fmt', 'NCHW') spatial_axis = get_spatial_dim(output_fmt) assert len(spatial_axis) == 2 # TODO add 1D sequence support feat_axis = get_channel_dim(output_fmt) if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \ not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]): # output sizes only checked if default res <= 448 * 448 to keep resource down input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size]) input_tensor = torch.randn((batch_size, *input_size), device=torch_device) # test forward_features (always unpooled) outputs = model.forward_features(input_tensor) assert outputs.shape[spatial_axis[0]] == pool_size[0], 'unpooled feature shape != config' assert outputs.shape[spatial_axis[1]] == pool_size[1], 'unpooled feature shape != config' if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)): assert outputs.shape[feat_axis] == model.num_features # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features model.reset_classifier(0) model.to(torch_device) outputs = model.forward(input_tensor) assert len(outputs.shape) == 2 assert outputs.shape[1] == model.num_features # test model forward without pooling and classifier model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through model.to(torch_device) outputs = model.forward(input_tensor) assert len(outputs.shape) == 4 if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)): # mobilenetv3/ghostnet/repghostnet/vgg forward_features vs removed pooling differ due to location or lack of GAP assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1] if 'pruned' not in model_name: # FIXME better pruned model handling # test classifier + global pool deletion via __init__ model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval() model.to(torch_device) outputs = model.forward(input_tensor) assert len(outputs.shape) == 4 if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)): assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1] # check classifier name matches default_cfg if cfg.get('num_classes', None): classifier = cfg['classifier'] if not isinstance(classifier, (tuple, list)): classifier = classifier, for c in classifier: assert c + ".weight" in state_dict.keys(), f'{c} not in model params' # check first conv(s) names match default_cfg first_conv = cfg['first_conv'] if isinstance(first_conv, str): first_conv = (first_conv,) assert isinstance(first_conv, (tuple, list)) for fc in first_conv: assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' @pytest.mark.cfg @pytest.mark.timeout(timeout300) @pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_default_cfgs_non_std(model_name, batch_size): """Run a single forward pass with each model""" model = create_model(model_name, pretrained=False) model.eval() model.to(torch_device) state_dict = model.state_dict() cfg = model.default_cfg input_size = _get_input_size(model=model) if max(input_size) > 320: # FIXME const pytest.skip("Fixed input size model > limit.") input_tensor = torch.randn((batch_size, *input_size), device=torch_device) feat_dim = getattr(model, 'feature_dim', None) outputs = model.forward_features(input_tensor) if isinstance(outputs, (tuple, list)): # cannot currently verify multi-tensor output. pass else: if feat_dim is None: feat_dim = -1 if outputs.ndim == 3 else 1 assert outputs.shape[feat_dim] == model.num_features # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features model.reset_classifier(0) model.to(torch_device) outputs = model.forward(input_tensor) if isinstance(outputs, (tuple, list)): outputs = outputs[0] if feat_dim is None: feat_dim = -1 if outputs.ndim == 3 else 1 assert outputs.shape[feat_dim] == model.num_features, 'pooled num_features != config' model = create_model(model_name, pretrained=False, num_classes=0).eval() model.to(torch_device) outputs = model.forward(input_tensor) if isinstance(outputs, (tuple, list)): outputs = outputs[0] if feat_dim is None: feat_dim = -1 if outputs.ndim == 3 else 1 assert outputs.shape[feat_dim] == model.num_features # check classifier name matches default_cfg if cfg.get('num_classes', None): classifier = cfg['classifier'] if not isinstance(classifier, (tuple, list)): classifier = classifier, for c in classifier: assert c + ".weight" in state_dict.keys(), f'{c} not in model params' # check first conv(s) names match default_cfg first_conv = cfg['first_conv'] if isinstance(first_conv, str): first_conv = (first_conv,) assert isinstance(first_conv, (tuple, list)) for fc in first_conv: assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' if 'GITHUB_ACTIONS' not in os.environ: @pytest.mark.timeout(240) @pytest.mark.parametrize('model_name', list_models(pretrained=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_load_pretrained(model_name, batch_size): """Create that pretrained weights load, verify support for in_chans != 3 while doing so.""" in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5) create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0) @pytest.mark.timeout(240) @pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS)) @pytest.mark.parametrize('batch_size', [1]) def test_model_features_pretrained(model_name, batch_size): """Create that pretrained weights load when features_only==True.""" create_model(model_name, pretrained=True, features_only=True) @pytest.mark.torchscript @pytest.mark.timeout(timeout120) @pytest.mark.parametrize( 'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_forward_torchscript(model_name, batch_size): """Run a single forward pass with each model""" input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) if max(input_size) > MAX_JIT_SIZE: pytest.skip("Fixed input size model > limit.") with set_scriptable(True): model = create_model(model_name, pretrained=False) model.eval() model = torch.jit.script(model) model.to(torch_device) outputs = model(torch.randn((batch_size, *input_size))) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs' EXCLUDE_FEAT_FILTERS = [ '*pruned*', # hopefully fix at some point ] + NON_STD_FILTERS if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system(): # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d'] @pytest.mark.features @pytest.mark.timeout(120) @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS, include_tags=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_forward_features(model_name, batch_size): """Run a single forward pass with each model in feature extraction mode""" model = create_model(model_name, pretrained=False, features_only=True) model.eval() expected_channels = model.feature_info.channels() expected_reduction = model.feature_info.reduction() assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6 input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE) if max(input_size) > MAX_FFEAT_SIZE: pytest.skip("Fixed input size model > limit.") output_fmt = getattr(model, 'output_fmt', 'NCHW') feat_axis = get_channel_dim(output_fmt) spatial_axis = get_spatial_dim(output_fmt) import math outputs = model(torch.randn((batch_size, *input_size))) assert len(expected_channels) == len(outputs) spatial_size = input_size[-2:] for e, r, o in zip(expected_channels, expected_reduction, outputs): assert e == o.shape[feat_axis] assert o.shape[spatial_axis[0]] <= math.ceil(spatial_size[0] / r) + 1 assert o.shape[spatial_axis[1]] <= math.ceil(spatial_size[1] / r) + 1 assert o.shape[0] == batch_size assert not torch.isnan(o).any() def _create_fx_model(model, train=False): # This block of code does a bit of juggling to handle any case where there are multiple outputs in train mode # So we trace once and look at the graph, and get the indices of the nodes that lead into the original fx output # node. Then we use those indices to select from train_nodes returned by torchvision get_graph_node_names tracer_kwargs = dict( leaf_modules=get_notrace_modules(), autowrap_functions=get_notrace_functions(), #enable_cpatching=True, param_shapes_constant=True ) train_nodes, eval_nodes = get_graph_node_names(model, tracer_kwargs=tracer_kwargs) eval_return_nodes = [eval_nodes[-1]] train_return_nodes = [train_nodes[-1]] if train: tracer = NodePathTracer(**tracer_kwargs) graph = tracer.trace(model) graph_nodes = list(reversed(graph.nodes)) output_node_names = [n.name for n in graph_nodes[0]._input_nodes.keys()] graph_node_names = [n.name for n in graph_nodes] output_node_indices = [-graph_node_names.index(node_name) for node_name in output_node_names] train_return_nodes = [train_nodes[ix] for ix in output_node_indices] fx_model = create_feature_extractor( model, train_return_nodes=train_return_nodes, eval_return_nodes=eval_return_nodes, tracer_kwargs=tracer_kwargs, ) return fx_model EXCLUDE_FX_FILTERS = ['vit_gi*'] # not enough memory to run fx on more models than other tests if 'GITHUB_ACTIONS' in os.environ: EXCLUDE_FX_FILTERS += [ 'beit_large*', 'mixer_l*', '*nfnet_f2*', '*resnext101_32x32d', 'resnetv2_152x2*', 'resmlp_big*', 'resnetrs270', 'swin_large*', 'vgg*', 'vit_large*', 'vit_base_patch8*', 'xcit_large*', ] @pytest.mark.fxforward @pytest.mark.timeout(120) @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS)) @pytest.mark.parametrize('batch_size', [1]) def test_model_forward_fx(model_name, batch_size): """ Symbolically trace each model and run single forward pass through the resulting GraphModule Also check that the output of a forward pass through the GraphModule is the same as that from the original Module """ if not has_fx_feature_extraction: pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") model = create_model(model_name, pretrained=False) model.eval() input_size = _get_input_size(model=model, target=TARGET_FWD_FX_SIZE) if max(input_size) > MAX_FWD_FX_SIZE: pytest.skip("Fixed input size model > limit.") with torch.no_grad(): inputs = torch.randn((batch_size, *input_size)) outputs = model(inputs) if isinstance(outputs, tuple): outputs = torch.cat(outputs) model = _create_fx_model(model) fx_outputs = tuple(model(inputs).values()) if isinstance(fx_outputs, tuple): fx_outputs = torch.cat(fx_outputs) assert torch.all(fx_outputs == outputs) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs' @pytest.mark.fxbackward @pytest.mark.timeout(120) @pytest.mark.parametrize('model_name', list_models( exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True)) @pytest.mark.parametrize('batch_size', [2]) def test_model_backward_fx(model_name, batch_size): """Symbolically trace each model and run single backward pass through the resulting GraphModule""" if not has_fx_feature_extraction: pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE) if max(input_size) > MAX_BWD_FX_SIZE: pytest.skip("Fixed input size model > limit.") model = create_model(model_name, pretrained=False, num_classes=42) model.train() num_params = sum([x.numel() for x in model.parameters()]) if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6: pytest.skip("Skipping FX backward test on model with more than 100M params.") model = _create_fx_model(model, train=True) outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) if isinstance(outputs, tuple): outputs = torch.cat(outputs) outputs.mean().backward() for n, x in model.named_parameters(): assert x.grad is not None, f'No gradient for {n}' num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) assert outputs.shape[-1] == 42 assert num_params == num_grad, 'Some parameters are missing gradients' assert not torch.isnan(outputs).any(), 'Output included NaNs' if 'GITHUB_ACTIONS' not in os.environ: # FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process # reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow EXCLUDE_FX_JIT_FILTERS = [ 'deit_*_distilled_patch16_224', 'levit*', 'pit_*_distilled_224', ] + EXCLUDE_FX_FILTERS @pytest.mark.timeout(120) @pytest.mark.parametrize( 'model_name', list_models( exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True)) @pytest.mark.parametrize('batch_size', [1]) def test_model_forward_fx_torchscript(model_name, batch_size): """Symbolically trace each model, script it, and run single forward pass""" if not has_fx_feature_extraction: pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.") input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) if max(input_size) > MAX_JIT_SIZE: pytest.skip("Fixed input size model > limit.") with set_scriptable(True): model = create_model(model_name, pretrained=False) model.eval() model = torch.jit.script(_create_fx_model(model)) with torch.no_grad(): outputs = tuple(model(torch.randn((batch_size, *input_size))).values()) if isinstance(outputs, tuple): outputs = torch.cat(outputs) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs'
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/tests/test_optim.py
""" Optimzier Tests These tests were adapted from PyTorch' optimizer tests. """ import math import pytest import functools from copy import deepcopy import torch from torch.testing._internal.common_utils import TestCase from torch.nn import Parameter from timm.scheduler import PlateauLRScheduler from timm.optim import create_optimizer_v2 import importlib import os torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cuda') # HACK relying on internal PyTorch test functionality for comparisons that I don't want to write torch_tc = TestCase() def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) optimizer = constructor(weight, bias) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) # to check if the optimizer can be printed as a string optimizer.__repr__() def fn(): optimizer.zero_grad() y = weight.mv(input) if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): y = y.cuda(bias.get_device()) loss = (y + bias).pow(2).sum() loss.backward() return loss initial_value = fn().item() for _i in range(200): for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): val_loss = fn() scheduler.step(val_loss) else: scheduler.step() optimizer.step(fn) assert fn().item() < initial_value def _test_state_dict(weight, bias, input, constructor): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) def fn_base(optimizer, weight, bias): optimizer.zero_grad() i = input_device if weight.device.type != 'cpu' else input loss = (weight.mv(i) + bias).pow(2).sum() loss.backward() return loss optimizer = constructor(weight, bias) fn = functools.partial(fn_base, optimizer, weight, bias) # Prime the optimizer for _i in range(20): optimizer.step(fn) # Clone the weights and construct new optimizer for them with torch.no_grad(): weight_c = Parameter(weight.clone().detach()) bias_c = Parameter(bias.clone().detach()) optimizer_c = constructor(weight_c, bias_c) fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) # Load state dict state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_c.load_state_dict(state_dict_c) # Run both optimizations in parallel for _i in range(20): optimizer.step(fn) optimizer_c.step(fn_c) torch_tc.assertEqual(weight, weight_c) torch_tc.assertEqual(bias, bias_c) # Make sure state dict is deterministic with equal but not identical parameters torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) # Make sure repeated parameters have identical representation in state dict optimizer_c.param_groups.extend(optimizer_c.param_groups) torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) # Check that state dict can be loaded even when we cast parameters # to a different type and move to a different device. if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return with torch.no_grad(): input_device = Parameter(input.clone().detach().float().to(torch_device)) weight_device = Parameter(weight.clone().detach().to(torch_device)) bias_device = Parameter(bias.clone().detach().to(torch_device)) optimizer_device = constructor(weight_device, bias_device) fn_device = functools.partial(fn_base, optimizer_device, weight_device, bias_device) state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_device.load_state_dict(state_dict_c) # Make sure state dict wasn't modified torch_tc.assertEqual(state_dict, state_dict_c) for _i in range(20): optimizer.step(fn) optimizer_device.step(fn_device) torch_tc.assertEqual(weight, weight_device) torch_tc.assertEqual(bias, bias_device) # validate deepcopy() copies all public attributes def getPublicAttr(obj): return set(k for k in obj.__dict__ if not k.startswith('_')) assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) def _test_basic_cases(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] _test_state_dict( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor ) _test_basic_cases_template( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor, scheduler_constructors ) # non-contiguous parameters _test_basic_cases_template( torch.randn(10, 5, 2)[..., 0], torch.randn(10, 2)[..., 0], torch.randn(5), constructor, scheduler_constructors ) # CUDA if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return _test_basic_cases_template( torch.randn(10, 5).to(torch_device), torch.randn(10).to(torch_device), torch.randn(5).to(torch_device), constructor, scheduler_constructors ) def _test_model(optimizer, params, device=torch.device('cpu')): weight = torch.tensor( [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], device=device, requires_grad=True) bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) model = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Sigmoid(), torch.nn.Linear(3, 1), torch.nn.Sigmoid()) model.to(device) pretrained_dict = model.state_dict() pretrained_dict['0.weight'] = weight pretrained_dict['0.bias'] = bias pretrained_dict['2.weight'] = weight2 pretrained_dict['2.bias'] = bias2 model.load_state_dict(pretrained_dict) optimizer = create_optimizer_v2(model, opt=optimizer, **params) prev_loss = float('inf') for i in range(20): optimizer.zero_grad() output = model(input) loss = output.sum() loss.backward() loss = loss.item() assert loss < prev_loss prev_loss = loss optimizer.step() def rosenbrock(tensor): x, y = tensor return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 def drosenbrock(tensor): x, y = tensor return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) def _test_rosenbrock(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] params_t = torch.tensor([1.5, 1.5]) params = Parameter(params_t) optimizer = constructor([params]) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) solution = torch.tensor([1, 1]) initial_dist = params.clone().detach().dist(solution) def eval(params, w): # Depending on w, provide only the x or y gradient optimizer.zero_grad() loss = rosenbrock(params) loss.backward() grad = drosenbrock(params.clone().detach()) # NB: We torture test the optimizer by returning an # uncoalesced sparse tensor if w: i = torch.LongTensor([[0, 0]]) x = grad[0] v = torch.tensor([x / 4., x - x / 4.]) else: i = torch.LongTensor([[1, 1]]) y = grad[1] v = torch.tensor([y - y / 4., y / 4.]) x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype) with torch.no_grad(): params.grad = x.to_dense() return loss for i in range(2000): # Do cyclic coordinate descent w = i % 2 optimizer.step(functools.partial(eval, params, w)) for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): scheduler.step(rosenbrock(params)) else: scheduler.step() torch_tc.assertLessEqual(params.clone().detach().dist(solution), initial_dist) def _build_params_dict(weight, bias, **kwargs): return [{'params': [weight]}, dict(params=[bias], **kwargs)] def _build_params_dict_single(weight, bias, **kwargs): return [dict(params=bias, **kwargs)] #@pytest.mark.parametrize('optimizer', ['sgd', 'momentum']) # FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts @pytest.mark.parametrize('optimizer', ['sgd']) def test_sgd(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=1e-2), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-2), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-2), optimizer) ) # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10)] # ) # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: ReduceLROnPlateau(opt)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.99, step_size=10), # lambda opt: ExponentialLR(opt, gamma=0.99), # lambda opt: ReduceLROnPlateau(opt)] # ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax']) def test_adam(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['adabelief']) def test_adabelief(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['radam', 'radabelief']) def test_rectified(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad']) def test_adaother(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-1) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['adafactor']) def test_adafactor(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['lamb', 'lambc']) def test_lamb(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=1e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc']) def test_lars(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=1e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=1e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw']) def test_madgrad(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['novograd']) def test_novograd(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf']) def test_rmsprop(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['adamp']) def test_adamp(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['sgdp']) def test_sgdp(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum']) def test_lookahead_sgd(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) @pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam']) def test_lookahead_adam(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) @pytest.mark.parametrize('optimizer', ['lookahead_radam']) def test_lookahead_radam(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer, lr=1e-3) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=3e-3), optimizer) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-4) )
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/tests/test_layers.py
import torch import torch.nn as nn from timm.layers import create_act_layer, set_layer_config import importlib import os torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cpu') class MLP(nn.Module): def __init__(self, act_layer="relu", inplace=True): super(MLP, self).__init__() self.fc1 = nn.Linear(1000, 100) self.act = create_act_layer(act_layer, inplace=inplace) self.fc2 = nn.Linear(100, 10) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.fc2(x) return x def _run_act_layer_grad(act_type, inplace=True): x = torch.rand(10, 1000) * 10 m = MLP(act_layer=act_type, inplace=inplace) def _run(x, act_layer=''): if act_layer: # replace act layer if set m.act = create_act_layer(act_layer, inplace=inplace) out = m(x) l = (out - 0).pow(2).sum() return l x = x.to(device=torch_device) m.to(device=torch_device) out_me = _run(x) with set_layer_config(scriptable=True): out_jit = _run(x, act_type) assert torch.isclose(out_jit, out_me) with set_layer_config(no_jit=True): out_basic = _run(x, act_type) assert torch.isclose(out_basic, out_jit) def test_swish_grad(): for _ in range(100): _run_act_layer_grad('swish') def test_mish_grad(): for _ in range(100): _run_act_layer_grad('mish') def test_hard_sigmoid_grad(): for _ in range(100): _run_act_layer_grad('hard_sigmoid', inplace=None) def test_hard_swish_grad(): for _ in range(100): _run_act_layer_grad('hard_swish') def test_hard_mish_grad(): for _ in range(100): _run_act_layer_grad('hard_mish')
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/tests/test_utils.py
from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops.misc import FrozenBatchNorm2d import timm from timm.utils.model import freeze, unfreeze def test_freeze_unfreeze(): model = timm.create_model('resnet18') # Freeze all freeze(model) # Check top level module assert model.fc.weight.requires_grad == False # Check submodule assert model.layer1[0].conv1.weight.requires_grad == False # Check BN assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) # Unfreeze all unfreeze(model) # Check top level module assert model.fc.weight.requires_grad == True # Check submodule assert model.layer1[0].conv1.weight.requires_grad == True # Check BN assert isinstance(model.layer1[0].bn1, BatchNorm2d) # Freeze some freeze(model, ['layer1', 'layer2.0']) # Check frozen assert model.layer1[0].conv1.weight.requires_grad == False assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) assert model.layer2[0].conv1.weight.requires_grad == False # Check not frozen assert model.layer3[0].conv1.weight.requires_grad == True assert isinstance(model.layer3[0].bn1, BatchNorm2d) assert model.layer2[1].conv1.weight.requires_grad == True # Unfreeze some unfreeze(model, ['layer1', 'layer2.0']) # Check not frozen assert model.layer1[0].conv1.weight.requires_grad == True assert isinstance(model.layer1[0].bn1, BatchNorm2d) assert model.layer2[0].conv1.weight.requires_grad == True # Freeze/unfreeze BN # From root freeze(model, ['layer1.0.bn1']) assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) unfreeze(model, ['layer1.0.bn1']) assert isinstance(model.layer1[0].bn1, BatchNorm2d) # From direct parent freeze(model.layer1[0], ['bn1']) assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) unfreeze(model.layer1[0], ['bn1']) assert isinstance(model.layer1[0].bn1, BatchNorm2d)
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/archived_changes.md
# Archived Changes ### Nov 22, 2021 * A number of updated weights anew new model defs * `eca_halonext26ts` - 79.5 @ 256 * `resnet50_gn` (new) - 80.1 @ 224, 81.3 @ 288 * `resnet50` - 80.7 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth)) * `resnext50_32x4d` - 81.1 @ 224, 82.0 @ 288 * `sebotnet33ts_256` (new) - 81.2 @ 224 * `lamhalobotnet50ts_256` - 81.5 @ 256 * `halonet50ts` - 81.7 @ 256 * `halo2botnet50ts_256` - 82.0 @ 256 * `resnet101` - 82.0 @ 224, 82.8 @ 288 * `resnetv2_101` (new) - 82.1 @ 224, 83.0 @ 288 * `resnet152` - 82.8 @ 224, 83.5 @ 288 * `regnetz_d8` (new) - 83.5 @ 256, 84.0 @ 320 * `regnetz_e8` (new) - 84.5 @ 256, 85.0 @ 320 * `vit_base_patch8_224` (85.8 top-1) & `in21k` variant weights added thanks [Martins Bruveris](https://github.com/martinsbruveris) * Groundwork in for FX feature extraction thanks to [Alexander Soare](https://github.com/alexander-soare) * models updated for tracing compatibility (almost full support with some distlled transformer exceptions) ### Oct 19, 2021 * ResNet strikes back (https://arxiv.org/abs/2110.00476) weights added, plus any extra training components used. Model weights and some more details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-rsb-weights) * BCE loss and Repeated Augmentation support for RSB paper * 4 series of ResNet based attention model experiments being added (implemented across byobnet.py/byoanet.py). These include all sorts of attention, from channel attn like SE, ECA to 2D QKV self-attention layers such as Halo, Bottlneck, Lambda. Details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights) * Working implementations of the following 2D self-attention modules (likely to be differences from paper or eventual official impl): * Halo (https://arxiv.org/abs/2103.12731) * Bottleneck Transformer (https://arxiv.org/abs/2101.11605) * LambdaNetworks (https://arxiv.org/abs/2102.08602) * A RegNetZ series of models with some attention experiments (being added to). These do not follow the paper (https://arxiv.org/abs/2103.06877) in any way other than block architecture, details of official models are not available. See more here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights) * ConvMixer (https://openreview.net/forum?id=TVHS5Y4dNvM), CrossVit (https://arxiv.org/abs/2103.14899), and BeiT (https://arxiv.org/abs/2106.08254) architectures + weights added * freeze/unfreeze helpers by [Alexander Soare](https://github.com/alexander-soare) ### Aug 18, 2021 * Optimizer bonanza! * Add LAMB and LARS optimizers, incl trust ratio clipping options. Tweaked to work properly in PyTorch XLA (tested on TPUs w/ `timm bits` [branch](https://github.com/rwightman/pytorch-image-models/tree/bits_and_tpu/timm/bits)) * Add MADGRAD from FB research w/ a few tweaks (decoupled decay option, step handling that works with PyTorch XLA) * Some cleanup on all optimizers and factory. No more `.data`, a bit more consistency, unit tests for all! * SGDP and AdamP still won't work with PyTorch XLA but others should (have yet to test Adabelief, Adafactor, Adahessian myself). * EfficientNet-V2 XL TF ported weights added, but they don't validate well in PyTorch (L is better). The pre-processing for the V2 TF training is a bit diff and the fine-tuned 21k -> 1k weights are very sensitive and less robust than the 1k weights. * Added PyTorch trained EfficientNet-V2 'Tiny' w/ GlobalContext attn weights. Only .1-.2 top-1 better than the SE so more of a curiosity for those interested. ### July 12, 2021 * Add XCiT models from [official facebook impl](https://github.com/facebookresearch/xcit). Contributed by [Alexander Soare](https://github.com/alexander-soare) ### July 5-9, 2021 * Add `efficientnetv2_rw_t` weights, a custom 'tiny' 13.6M param variant that is a bit better than (non NoisyStudent) B3 models. Both faster and better accuracy (at same or lower res) * top-1 82.34 @ 288x288 and 82.54 @ 320x320 * Add [SAM pretrained](https://arxiv.org/abs/2106.01548) in1k weight for ViT B/16 (`vit_base_patch16_sam_224`) and B/32 (`vit_base_patch32_sam_224`) models. * Add 'Aggregating Nested Transformer' (NesT) w/ weights converted from official [Flax impl](https://github.com/google-research/nested-transformer). Contributed by [Alexander Soare](https://github.com/alexander-soare). * `jx_nest_base` - 83.534, `jx_nest_small` - 83.120, `jx_nest_tiny` - 81.426 ### June 23, 2021 * Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050). Hparams for this and other recent MLP training [here](https://gist.github.com/rwightman/d6c264a9001f9167e06c209f630b2cc6) ### June 20, 2021 * Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270) * .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg) * See [example notebook](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb) from [official impl](https://github.com/google-research/vision_transformer/) for navigating the augreg weights * Replaced all default weights w/ best AugReg variant (if possible). All AugReg 21k classifiers work. * Highlights: `vit_large_patch16_384` (87.1 top-1), `vit_large_r50_s32_384` (86.2 top-1), `vit_base_patch16_384` (86.0 top-1) * `vit_deit_*` renamed to just `deit_*` * Remove my old small model, replace with DeiT compatible small w/ AugReg weights * Add 1st training of my `gmixer_24_224` MLP /w GLU, 78.1 top-1 w/ 25M params. * Add weights from official ResMLP release (https://github.com/facebookresearch/deit) * Add `eca_nfnet_l2` weights from my 'lightweight' series. 84.7 top-1 at 384x384. * Add distilled BiT 50x1 student and 152x2 Teacher weights from [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237) * NFNets and ResNetV2-BiT models work w/ Pytorch XLA now * weight standardization uses F.batch_norm instead of std_mean (std_mean wasn't lowered) * eps values adjusted, will be slight differences but should be quite close * Improve test coverage and classifier interface of non-conv (vision transformer and mlp) models * Cleanup a few classifier / flatten details for models w/ conv classifiers or early global pool * Please report any regressions, this PR touched quite a few models. ### June 8, 2021 * Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1. * Add ResNet51-Q model w/ pretrained weights at 82.36 top-1. * NFNet inspired block layout with quad layer stem and no maxpool * Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288 ### May 25, 2021 * Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models * Cleanup input_size/img_size override handling and testing for all vision transformer models * Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params. ### May 14, 2021 * Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl. * 1k trained variants: `tf_efficientnetv2_s/m/l` * 21k trained variants: `tf_efficientnetv2_s/m/l_in21k` * 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k` * v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3` * Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s` * Some blank `efficientnetv2_*` models in-place for future native PyTorch training ### May 5, 2021 * Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen) * Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit) * Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora) * Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin) * Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23) * Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai) * Update ByoaNet attention modles * Improve SA module inits * Hack together experimental stand-alone Swin based attn module and `swinnet` * Consistent '26t' model defs for experiments. * Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1. * WandB logging support ### April 13, 2021 * Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer ### April 12, 2021 * Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256. * Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training. * Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs * Lambda Networks - https://arxiv.org/abs/2102.08602 * Bottleneck Transformers - https://arxiv.org/abs/2101.11605 * Halo Nets - https://arxiv.org/abs/2103.12731 * Adabelief optimizer contributed by Juntang Zhuang ### April 1, 2021 * Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference * Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit) * Merged distilled variant into main for torchscript compatibility * Some `timm` cleanup/style tweaks and weights have hub download support * Cleanup Vision Transformer (ViT) models * Merge distilled (DeiT) model into main so that torchscript can work * Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch) * Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids * Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants * nn.Sequential for block stack (does not break downstream compat) * TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT) * Add RegNetY-160 weights from DeiT teacher model * Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288 * Some fixes/improvements for TFDS dataset wrapper ### March 7, 2021 * First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc). * Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation. ### Feb 18, 2021 * Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets). * Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn. * These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants. * Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated). * Matching the original pre-processing as closely as possible I get these results: * `dm_nfnet_f6` - 86.352 * `dm_nfnet_f5` - 86.100 * `dm_nfnet_f4` - 85.834 * `dm_nfnet_f3` - 85.676 * `dm_nfnet_f2` - 85.178 * `dm_nfnet_f1` - 84.696 * `dm_nfnet_f0` - 83.464 ### Feb 16, 2021 * Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py. * AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc` * PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0` * PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value` * AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet. ### Feb 12, 2021 * Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs ### Feb 10, 2021 * More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks') * GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py` * RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py` * classic VGG (from torchvision, impl in `vgg`) * Refinements to normalizer layer arg handling and normalizer+act layer handling in some models * Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not. * Fix a few bugs introduced since last pypi release ### Feb 8, 2021 * Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352. * `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256 * `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256 * `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320 * Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed). * Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test. ### Jan 30, 2021 * Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692) ### Jan 25, 2021 * Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer * Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer * ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support * NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning * Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit * Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes * Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script * Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2` * Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar * Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp` * Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling ### Jan 3, 2021 * Add SE-ResNet-152D weights * 256x256 val, 0.94 crop top-1 - 83.75 * 320x320 val, 1.0 crop - 84.36 * Update results files ### Dec 18, 2020 * Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256 * 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25) * 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76) * 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01) ### Dec 7, 2020 * Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models * Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript * PyPi release @ 0.3.2 (needed by EfficientDet) ### Oct 30, 2020 * Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue. * Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16. * Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated. * Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage. * PyPi release @ 0.3.0 version! ### Oct 26, 2020 * Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer * Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl * ViT-B/16 - 84.2 * ViT-B/32 - 81.7 * ViT-L/16 - 85.2 * ViT-L/32 - 81.5 ### Oct 21, 2020 * Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs. ### Oct 13, 2020 * Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train... * Adafactor and AdaHessian (FP32 only, no AMP) optimizers * EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1 * Pip release, doc updates pending a few more changes... ### Sept 18, 2020 * New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D * Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D) ### Sept 3, 2020 * New weights * Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision) * SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene) * Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`) * Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel. ### Aug 12, 2020 * New/updated weights from training experiments * EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp) * RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver) * CSPResNet50 - 79.6 top-1 (76.6 from official ver) * Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples * Some fixes for using pretrained weights with `in_chans` != 3 on several models. ### Aug 5, 2020 Universal feature extraction, new models, new weights, new test sets. * All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride. * New models * CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet * ReXNet * (Modified Aligned) Xception41/65/71 (a proper port of TF models) * New trained weights * SEResNet50 - 80.3 top-1 * CSPDarkNet53 - 80.1 top-1 * CSPResNeXt50 - 80.0 top-1 * DPN68b - 79.2 top-1 * EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314)) * Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md) * Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh) * Train script and loader/transform tweaks to punch through more aug arguments * README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/ * adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell) ### June 11, 2020 Bunch of changes: * DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions * VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1 * Activation factory added along with new activations: * select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export) * hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish * context mgr for setting exportable/scriptable/no_jit states * Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface * Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this * Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call * Prep for 0.1.28 pip release ### May 12, 2020 * Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955)) ### May 3, 2020 * Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo) ### May 1, 2020 * Merged a number of execellent contributions in the ResNet model family over the past month * BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3. * TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23) * ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo) * 200 pretrained models in total now with updated results csv in results folder ### April 5, 2020 * Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite * 3.5M param MobileNet-V2 100 @ 73% * 4.5M param MobileNet-V2 110d @ 75% * 6.1M param MobileNet-V2 140 @ 76.5% * 5.8M param MobileNet-V2 120d @ 77.3% ### March 18, 2020 * Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) * Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) ### April 5, 2020 * Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite * 3.5M param MobileNet-V2 100 @ 73% * 4.5M param MobileNet-V2 110d @ 75% * 6.1M param MobileNet-V2 140 @ 76.5% * 5.8M param MobileNet-V2 120d @ 77.3% ### March 18, 2020 * Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) * Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) ### Feb 29, 2020 * New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1 * IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models * overall results similar to a bit better training from scratch on a few smaller models tried * performance early in training seems consistently improved but less difference by end * set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour * Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training ### Feb 18, 2020 * Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268): * Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion * ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks * Add Selective Kernel Nets on top of ResNet base, pretrained weights * skresnet18 - 73% top-1 * skresnet34 - 76.9% top-1 * skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1 * ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme) * CBAM attention experiment (not the best results so far, may remove) * Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets * Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants * Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights ### Feb 12, 2020 * Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet) ### Feb 6, 2020 * Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) ### Feb 1/2, 2020 * Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization. * Update results csv files on all models for ImageNet validation and three other test sets * Push PyPi package update ### Jan 31, 2020 * Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below. ### Jan 11/12, 2020 * Master may be a bit unstable wrt to training, these changes have been tested but not all combos * Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset * SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper * ResNet-50 AugMix trained model w/ 79% top-1 added * `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd' ### Jan 3, 2020 * Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section) * Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs. ### Dec 30, 2019 * Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch ### Dec 28, 2019 * Add new model weights and training hparams (see Training Hparams section) * `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct * trained with RandAugment, ended up with an interesting but less than perfect result (see training section) * `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5 * deep stem (32, 32, 64), avgpool downsample * stem/dowsample from bag-of-tricks paper * `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5 * deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant) * stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments ### Dec 23, 2019 * Add RandAugment trained MixNet-XL weights with 80.48 top-1. * `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval ### Dec 4, 2019 * Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5). ### Nov 29, 2019 * Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded. * AdvProp weights added * Official TF MobileNetv3 weights added * EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here... * HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification * Consistency in global pooling, `reset_classifer`, and `forward_features` across models * `forward_features` always returns unpooled feature maps now * Reasonable chance I broke something... let me know ### Nov 22, 2019 * Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update. * `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise.
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/results.md
# Results CSV files containing an ImageNet-1K and out-of-distribution (OOD) test set validation results for all models with pretrained weights is located in the repository [results folder](https://github.com/rwightman/pytorch-image-models/tree/master/results). ## Self-trained Weights The table below includes ImageNet-1k validation results of model weights that I've trained myself. It is not updated as frequently as the csv results outputs linked above. |Model | Acc@1 (Err) | Acc@5 (Err) | Param # (M) | Interpolation | Image Size | |---|---|---|---|---|---| | efficientnet_b3a | 82.242 (17.758) | 96.114 (3.886) | 12.23 | bicubic | 320 (1.0 crop) | | efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | bicubic | 300 | | regnet_32 | 82.002 (17.998) | 95.906 (4.094) | 19.44 | bicubic | 224 | | skresnext50d_32x4d | 81.278 (18.722) | 95.366 (4.634) | 27.5 | bicubic | 288 (1.0 crop) | | seresnext50d_32x4d | 81.266 (18.734) | 95.620 (4.380) | 27.6 | bicubic | 224 | | efficientnet_b2a | 80.608 (19.392) | 95.310 (4.690) | 9.11 | bicubic | 288 (1.0 crop) | | resnet50d | 80.530 (19.470) | 95.160 (4.840) | 25.6 | bicubic | 224 | | mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90 | bicubic | 224 | | efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11 | bicubic | 260 | | seresnet50 | 80.274 (19.726) | 95.070 (4.930) | 28.1 | bicubic | 224 | | skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5 | bicubic | 224 | | cspdarknet53 | 80.058 (19.942) | 95.084 (4.916) | 27.6 | bicubic | 256 | | cspresnext50 | 80.040 (19.960) | 94.944 (5.056) | 20.6 | bicubic | 224 | | resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25 | bicubic | 224 | | resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1 | bicubic | 224 | | cspresnet50 | 79.574 (20.426) | 94.712 (5.288) | 21.6 | bicubic | 256 | | ese_vovnet39b | 79.320 (20.680) | 94.710 (5.290) | 24.6 | bicubic | 224 | | resnetblur50 | 79.290 (20.710) | 94.632 (5.368) | 25.6 | bicubic | 224 | | dpn68b | 79.216 (20.784) | 94.414 (5.586) | 12.6 | bicubic | 224 | | resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6 | bicubic | 224 | | mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | bicubic | 224 | | efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79 | bicubic | 240 | | efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | bicubic | 224 | | seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8 | bicubic | 224 | | seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8 | bicubic | 224 | | efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.29 | bicubic | 224 | | seresnext26d_32x4d | 77.602 (22.398) | 93.608 (6.392) | 16.8 | bicubic | 224 | | mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | bicubic | 224 | | mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | bicubic | 224 | | resnet34d | 77.116 (22.884) | 93.382 (6.618) | 21.8 | bicubic | 224 | | seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8 | bicubic | 224 | | skresnet34 | 76.912 (23.088) | 93.322 (6.678) | 22.2 | bicubic | 224 | | ese_vovnet19b_dw | 76.798 (23.202) | 93.268 (6.732) | 6.5 | bicubic | 224 | | resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16 | bicubic | 224 | | densenetblur121d | 76.576 (23.424) | 93.190 (6.810) | 8.0 | bicubic | 224 | | mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | bicubic | 224 | | mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | bicubic | 224 | | mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | bicubic | 224 | | mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | bicubic | 224 | | mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.89 | bicubic | 224 | | resnet26 | 75.292 (24.708) | 92.57 (7.43) | 16 | bicubic | 224 | | fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | bilinear | 224 | | resnet34 | 75.110 (24.890) | 92.284 (7.716) | 22 | bilinear | 224 | | mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | bicubic | 224 | | seresnet34 | 74.808 (25.192) | 92.124 (7.876) | 22 | bilinear | 224 | | mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.38 | bicubic | 224 | | spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.42 | bilinear | 224 | | skresnet18 | 73.038 (26.962) | 91.168 (8.832) | 11.9 | bicubic | 224 | | mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | bicubic | 224 | | resnet18d | 72.260 (27.740) | 90.696 (9.304) | 11.7 | bicubic | 224 | | seresnet18 | 71.742 (28.258) | 90.334 (9.666) | 11.8 | bicubic | 224 | ## Ported and Other Weights For weights ported from other deep learning frameworks (Tensorflow, MXNet GluonCV) or copied from other PyTorch sources, please see the full results tables for ImageNet and various OOD test sets at in the [results tables](https://github.com/rwightman/pytorch-image-models/tree/master/results). Model code .py files contain links to original sources of models and weights.
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/scripts.md
# Scripts A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on [NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). ## Training Script The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: `./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4` NOTE: It is recommended to use PyTorch 1.9+ w/ PyTorch native AMP and DDP instead of APEX AMP. `--amp` defaults to native AMP as of timm ver 0.4.3. `--apex-amp` will force use of APEX components if they are installed. ## Validation / Inference Scripts Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. To validate with the model's pretrained weights (if they exist): `python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained` To run inference from a checkpoint: `python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar`
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/index.md
# Getting Started ## Welcome Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`. For a more comprehensive set of docs (currently under development), please visit [timmdocs](http://timm.fast.ai) by [Aman Arora](https://github.com/amaarora). ## Install The library can be installed with pip: ``` pip install timm ``` I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use: ``` pip install git+https://github.com/rwightman/pytorch-image-models.git ``` !!! info "Conda Environment" All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically 3.7, 3.8, 3.9, 3.10 Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment. PyTorch versions 1.9, 1.10, 1.11 have been tested with the latest versions of this code. I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda: ``` conda create -n torch-env conda activate torch-env conda install pytorch torchvision cudatoolkit=11.3 -c pytorch conda install pyyaml ``` ## Load a Pretrained Model Pretrained models can be loaded using `timm.create_model` ```python import timm m = timm.create_model('mobilenetv3_large_100', pretrained=True) m.eval() ``` ## List Models with Pretrained Weights ```python import timm from pprint import pprint model_names = timm.list_models(pretrained=True) pprint(model_names) >>> ['adv_inception_v3', 'cspdarknet53', 'cspresnext50', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'densenetblur121d', 'dla34', 'dla46_c', ... ] ``` ## List Model Architectures by Wildcard ```python import timm from pprint import pprint model_names = timm.list_models('*resne*t*') pprint(model_names) >>> ['cspresnet50', 'cspresnet50d', 'cspresnet50w', 'cspresnext50', ... ] ```
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/training_hparam_examples.md
# Training Examples ## EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 These params are for dual Titan RTX cards with NVIDIA Apex installed: `./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016` ## MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 This params are for dual Titan RTX cards with NVIDIA Apex installed: `./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce` ## SE-ResNeXt-26-D and SE-ResNeXt-26-T These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: `./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112` ## EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. ## EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 [Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. `./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048` ## ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. `./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce` ## EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. `./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064` ## MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 `./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9` ## ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. `./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce`
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/changes.md
# Recent Changes ### Aug 29, 2022 * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this: * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T) ### Aug 26, 2022 * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit * Initial CoAtNet and MaxVit timm pretrained weights (working on more): * `coatnet_nano_rw_224` - 81.7 @ 224 (T) * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T) * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks * `coatnet_bn_0_rw_224` - 82.4 (T) * `maxvit_nano_rw_256` - 82.9 @ 256 (T) * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T) * `coatnet_1_rw_224` - 83.6 @ 224 (G) * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes) * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit) * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer) * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT) * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost) ### Aug 15, 2022 * ConvNeXt atto weights added * `convnext_atto` - 75.7 @ 224, 77.0 @ 288 * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288 ### Aug 5, 2022 * More custom ConvNeXt smaller model defs with weights * `convnext_femto` - 77.5 @ 224, 78.7 @ 288 * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288 * `convnext_pico` - 79.5 @ 224, 80.4 @ 288 * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288 * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288 * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt) ### July 28, 2022 * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)! ### July 27, 2022 * All runtime benchmark and validation result csv files are finally up-to-date! * A few more weights & model defs added: * `darknetaa53` - 79.8 @ 256, 80.5 @ 288 * `convnext_nano` - 80.8 @ 224, 81.5 @ 288 * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288 * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288 * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320 * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program! * Add output_stride=8 and 16 support to ConvNeXt (dilation) * deit3 models not being able to resize pos_emb fixed * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5) ### July 8, 2022 More models, more fixes * Official research models (w/ weights) added: * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt) * MobileViT-V2 from (https://github.com/apple/ml-cvnets) * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit) * My own models: * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14) * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs. * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token. * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights. * My own model weight results (all ImageNet-1k training) * `resnet10t` - 66.5 @ 176, 68.3 @ 224 * `resnet14t` - 71.3 @ 176, 72.3 @ 224 * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288 * `darknet53` - 80.0 @ 256, 80.5 @ 288 * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288 * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288 * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288 * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288 * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320 * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320 * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320 * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320 * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs. * Hugging Face Hub support fixes verified, demo notebook TBA * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation. * Add support to change image extensions scanned by `timm` datasets/readers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103) * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases. * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges. * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py` * Numerous bug fixes * Currently testing for imminent PyPi 0.6.x release * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)? * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ... ### May 13, 2022 * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript. * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects. * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program) * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake) * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020) * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg) ### May 2, 2022 * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`) * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`) * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae). ### April 22, 2022 * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/). * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress. * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288 * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288 ### March 23, 2022 * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795) * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs. ### March 21, 2022 * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required. * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights) * `regnety_040` - 82.3 @ 224, 82.96 @ 288 * `regnety_064` - 83.0 @ 224, 83.65 @ 288 * `regnety_080` - 83.17 @ 224, 83.86 @ 288 * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act) * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act) * `regnetz_040` - 83.67 @ 256, 84.25 @ 320 * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head) * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm) * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS) * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS) * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS) * `xception41p` - 82 @ 299 (timm pre-act) * `xception65` - 83.17 @ 299 * `xception65p` - 83.14 @ 299 (timm pre-act) * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288 * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288 * `resnetrs200` - 83.85 @ 256, 84.44 @ 320 * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon) * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks. * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2 * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer * VOLO models w/ weights adapted from https://github.com/sail-sg/volo * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception * Grouped conv support added to EfficientNet family * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler * Gradient checkpointing support added to many models * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head` * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head` ### Feb 2, 2022 * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so. * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs! * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable. ### Jan 14, 2022 * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon.... * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way... * `mnasnet_small` - 65.6 top-1 * `mobilenetv2_050` - 65.9 * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1 * `semnasnet_075` - 73 * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0 * TinyNet models added by [rsomani95](https://github.com/rsomani95) * LCNet added via MobileNetV3 architecture ### Jan 5, 2023 * ConvNeXt-V2 models and weights added to existing `convnext.py` * Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808) * Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC) ### Dec 23, 2022 🎄☃ * Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013) * NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP * Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit) * More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use) * More ImageNet-12k (subset of 22k) pretrain models popping up: * `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448 * `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384 * `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256 * `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288 ### Dec 8, 2022 * Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some) * original source: https://github.com/baaivision/EVA | model | top1 | param_count | gmac | macts | hub | |:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------| | eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 6, 2022 * Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`. * original source: https://github.com/baaivision/EVA * paper: https://arxiv.org/abs/2211.07636 | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------| | eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 5, 2022 * Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm` * vision_transformer, maxvit, convnext are the first three model impl w/ support * model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling * bugs are likely, but I need feedback so please try it out * if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) * Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument * Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output * Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models | model | top1 | param_count | gmac | macts | hub | |:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------| | vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) | | vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) | | vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) | | vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) | * Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit * There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------| | maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) | | maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) | | maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) | | maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) | | maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) | | maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) | | maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) | | maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) | | maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) | | maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) | | maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) | | maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) | | maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) | | maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) | | maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) | | maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) | ### Oct 15, 2022 * Train and validation script enhancements * Non-GPU (ie CPU) device support * SLURM compatibility for train script * HF datasets support (via ReaderHfds) * TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate) * in_chans !=3 support for scripts / loader * Adan optimizer * Can enable per-step LR scheduling via args * Dataset 'parsers' renamed to 'readers', more descriptive of purpose * AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16` * main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds * master -> main branch rename ### Oct 10, 2022 * More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments: * `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm) * `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN) * `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G) * `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN) * `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T) * NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun. ### Sept 23, 2022 * LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier) * vit_base_patch32_224_clip_laion2b * vit_large_patch14_224_clip_laion2b * vit_huge_patch14_224_clip_laion2b * vit_giant_patch14_224_clip_laion2b ### Sept 7, 2022 * Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future * Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2 * Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants: * `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T) * `maxvit_tiny_rw_224` - 83.5 @ 224 (G) * `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T) ### Aug 29, 2022 * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this: * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T) ### Aug 26, 2022 * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit * Initial CoAtNet and MaxVit timm pretrained weights (working on more): * `coatnet_nano_rw_224` - 81.7 @ 224 (T) * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T) * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks * `coatnet_bn_0_rw_224` - 82.4 (T) * `maxvit_nano_rw_256` - 82.9 @ 256 (T) * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T) * `coatnet_1_rw_224` - 83.6 @ 224 (G) * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes) * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit) * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer) * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT) * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost) ### Aug 15, 2022 * ConvNeXt atto weights added * `convnext_atto` - 75.7 @ 224, 77.0 @ 288 * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288 ### Aug 5, 2022 * More custom ConvNeXt smaller model defs with weights * `convnext_femto` - 77.5 @ 224, 78.7 @ 288 * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288 * `convnext_pico` - 79.5 @ 224, 80.4 @ 288 * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288 * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288 * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt) ### July 28, 2022 * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)! ### July 27, 2022 * All runtime benchmark and validation result csv files are up-to-date! * A few more weights & model defs added: * `darknetaa53` - 79.8 @ 256, 80.5 @ 288 * `convnext_nano` - 80.8 @ 224, 81.5 @ 288 * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288 * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288 * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320 * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program! * Add output_stride=8 and 16 support to ConvNeXt (dilation) * deit3 models not being able to resize pos_emb fixed * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5) ### July 8, 2022 More models, more fixes * Official research models (w/ weights) added: * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt) * MobileViT-V2 from (https://github.com/apple/ml-cvnets) * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit) * My own models: * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14) * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs. * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token. * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights. * My own model weight results (all ImageNet-1k training) * `resnet10t` - 66.5 @ 176, 68.3 @ 224 * `resnet14t` - 71.3 @ 176, 72.3 @ 224 * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288 * `darknet53` - 80.0 @ 256, 80.5 @ 288 * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288 * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288 * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288 * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288 * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320 * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320 * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320 * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320 * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs. * Hugging Face Hub support fixes verified, demo notebook TBA * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation. * Add support to change image extensions scanned by `timm` datasets/parsers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103) * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases. * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges. * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py` * Numerous bug fixes * Currently testing for imminent PyPi 0.6.x release * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)? * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ... ### May 13, 2022 * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript. * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects. * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program) * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake) * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020) * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg) ### May 2, 2022 * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`) * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`) * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae). ### April 22, 2022 * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/). * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress. * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288 * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288 ### March 23, 2022 * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795) * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs. ### March 21, 2022 * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required. * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights) * `regnety_040` - 82.3 @ 224, 82.96 @ 288 * `regnety_064` - 83.0 @ 224, 83.65 @ 288 * `regnety_080` - 83.17 @ 224, 83.86 @ 288 * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act) * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act) * `regnetz_040` - 83.67 @ 256, 84.25 @ 320 * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head) * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm) * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS) * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS) * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS) * `xception41p` - 82 @ 299 (timm pre-act) * `xception65` - 83.17 @ 299 * `xception65p` - 83.14 @ 299 (timm pre-act) * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288 * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288 * `resnetrs200` - 83.85 @ 256, 84.44 @ 320 * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon) * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks. * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2 * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer * VOLO models w/ weights adapted from https://github.com/sail-sg/volo * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception * Grouped conv support added to EfficientNet family * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler * Gradient checkpointing support added to many models * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head` * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head` ### Feb 2, 2022 * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so. * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs! * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable. ### Jan 14, 2022 * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon.... * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way... * `mnasnet_small` - 65.6 top-1 * `mobilenetv2_050` - 65.9 * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1 * `semnasnet_075` - 73 * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0 * TinyNet models added by [rsomani95](https://github.com/rsomani95) * LCNet added via MobileNetV3 architecture
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/feature_extraction.md
# Feature Extraction All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification. ## Penultimate Layer Features (Pre-Classifier Features) The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features. ### Unpooled There are three ways to obtain unpooled features. Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks. If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network. #### forward_features() ```python hl_lines="3 6" import torch import timm m = timm.create_model('xception41', pretrained=True) o = m(torch.randn(2, 3, 299, 299)) print(f'Original shape: {o.shape}') o = m.forward_features(torch.randn(2, 3, 299, 299)) print(f'Unpooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Unpooled shape: torch.Size([2, 2048, 10, 10]) ``` #### Create with no classifier and pooling ```python hl_lines="3" import torch import timm m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='') o = m(torch.randn(2, 3, 224, 224)) print(f'Unpooled shape: {o.shape}') ``` Output: ```text Unpooled shape: torch.Size([2, 2048, 7, 7]) ``` #### Remove it later ```python hl_lines="3 6" import torch import timm m = timm.create_model('densenet121', pretrained=True) o = m(torch.randn(2, 3, 224, 224)) print(f'Original shape: {o.shape}') m.reset_classifier(0, '') o = m(torch.randn(2, 3, 224, 224)) print(f'Unpooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Unpooled shape: torch.Size([2, 1024, 7, 7]) ``` ### Pooled To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact. #### Create with no classifier ```python hl_lines="3" import torch import timm m = timm.create_model('resnet50', pretrained=True, num_classes=0) o = m(torch.randn(2, 3, 224, 224)) print(f'Pooled shape: {o.shape}') ``` Output: ```text Pooled shape: torch.Size([2, 2048]) ``` #### Remove it later ```python hl_lines="3 6" import torch import timm m = timm.create_model('ese_vovnet19b_dw', pretrained=True) o = m(torch.randn(2, 3, 224, 224)) print(f'Original shape: {o.shape}') m.reset_classifier(0) o = m(torch.randn(2, 3, 224, 224)) print(f'Pooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Pooled shape: torch.Size([2, 1024]) ``` ## Multi-scale Feature Maps (Feature Pyramid) Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library. `timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels. A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default 5 strides will be output from most models (not all have that many), with the first starting at 2 (some start at 1 or 4). ### Create a feature map extraction model ```python hl_lines="3" import torch import timm m = timm.create_model('resnest26d', features_only=True, pretrained=True) o = m(torch.randn(2, 3, 224, 224)) for x in o: print(x.shape) ``` Output: ```text torch.Size([2, 64, 112, 112]) torch.Size([2, 256, 56, 56]) torch.Size([2, 512, 28, 28]) torch.Size([2, 1024, 14, 14]) torch.Size([2, 2048, 7, 7]) ``` ### Query the feature information After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points. ```python hl_lines="3 4" import torch import timm m = timm.create_model('regnety_032', features_only=True, pretrained=True) print(f'Feature channels: {m.feature_info.channels()}') o = m(torch.randn(2, 3, 224, 224)) for x in o: print(x.shape) ``` Output: ```text Feature channels: [32, 72, 216, 576, 1512] torch.Size([2, 32, 112, 112]) torch.Size([2, 72, 56, 56]) torch.Size([2, 216, 28, 28]) torch.Size([2, 576, 14, 14]) torch.Size([2, 1512, 7, 7]) ``` ### Select specific feature levels or limit the stride There are two additional creation arguments impacting the output features. * `out_indices` selects which indices to output * `output_stride` limits the feature output stride of the network (also works in classification mode BTW) `out_indices` is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most models, index 0 is the stride 2 features, and index 4 is stride 32. `output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`. ```python hl_lines="3 4 5" import torch import timm m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True) print(f'Feature channels: {m.feature_info.channels()}') print(f'Feature reduction: {m.feature_info.reduction()}') o = m(torch.randn(2, 3, 320, 320)) for x in o: print(x.shape) ``` Output: ```text Feature channels: [512, 2048] Feature reduction: [8, 8] torch.Size([2, 512, 40, 40]) torch.Size([2, 2048, 40, 40]) ```
0
hf_public_repos/pytorch-image-models
hf_public_repos/pytorch-image-models/docs/models.md
# Model Summaries The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below. Most included models have pretrained weights. The weights are either: 1. from their original sources 2. ported by myself from their original impl in a different framework (e.g. Tensorflow models) 3. trained from scratch using the included training script The validation results for the pretrained weights are [here](results.md) A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm). ## Big Transfer ResNetV2 (BiT) [[resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)] * Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370 * Reference code: https://github.com/google-research/big_transfer ## Cross-Stage Partial Networks [[cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)] * Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 * Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks ## DenseNet [[densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)] * Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## DLA [[dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)] * Paper: https://arxiv.org/abs/1707.06484 * Code: https://github.com/ucbdrive/dla ## Dual-Path Networks [[dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)] * Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629 * My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained * Reference code: https://github.com/cypw/DPNs ## GPU-Efficient Networks [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] * Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 * Reference code: https://github.com/idstcv/GPU-Efficient-Networks ## HRNet [[hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)] * Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919 * Code: https://github.com/HRNet/HRNet-Image-Classification ## Inception-V3 [[inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)] * Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## Inception-V4 [[inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)] * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## Inception-ResNet-V2 [[inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)] * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## NASNet-A [[nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)] * Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## PNasNet-5 [[pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)] * Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## EfficientNet [[efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)] * Papers: * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html * MixNet - https://arxiv.org/abs/1907.09595 * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 * MobileNet-V2 - https://arxiv.org/abs/1801.04381 * FBNet-C - https://arxiv.org/abs/1812.03443 * Single-Path NAS - https://arxiv.org/abs/1904.02877 * My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch * Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## MobileNet-V3 [[mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)] * Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet ## RegNet [[regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)] * Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 * Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py ## RepVGG [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] * Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 * Reference code: https://github.com/DingXiaoH/RepVGG ## ResNet, ResNeXt [[resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)] * ResNet (V1B) * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * ResNeXt * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * 'Bag of Tricks' / Gluon C, D, E, S ResNet variants * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187 * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py * Instagram pretrained / ImageNet tuned ResNeXt101 * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932 * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546 * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Squeeze-and-Excitation Networks * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated * ECAResNet (ECA-Net) * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4 * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet ## Res2Net [[res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)] * Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 * Code: https://github.com/gasvn/Res2Net ## ResNeSt [[resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)] * Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 * Code: https://github.com/zhanghang1989/ResNeSt ## ReXNet [[rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)] * Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992 * Code: https://github.com/clovaai/rexnet ## Selective-Kernel Networks [[sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)] * Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586 * Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn ## SelecSLS [[selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)] * Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837 * Code: https://github.com/mehtadushy/SelecSLS-Pytorch ## Squeeze-and-Excitation Networks [[senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)] NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py` * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: https://github.com/Cadene/pretrained-models.pytorch ## TResNet [[tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)] * Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630 * Code: https://github.com/mrT23/TResNet ## VGG [[vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)] * Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf * Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py ## Vision Transformer [[vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)] * Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 * Reference code and pretrained weights: https://github.com/google-research/vision_transformer ## VovNet V2 and V1 [[vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)] * Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 * Reference code: https://github.com/youngwanLEE/vovnet-detectron2 ## Xception [[xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)] * Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357 * Code: https://github.com/Cadene/pretrained-models.pytorch ## Xception (Modified Aligned, Gluon) [[gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)] * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/ ## Xception (Modified Aligned, TF) [[aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)] * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/csp-resnet.md
# CSP-ResNet **CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('cspresnet50', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspresnet50`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('cspresnet50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{wang2019cspnet, title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, year={2019}, eprint={1911.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP ResNet Paper: Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance Models: - Name: cspresnet50 In Collection: CSP ResNet Metadata: FLOPs: 5924992000 Parameters: 21620000 File Size: 86679303 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Label Smoothing - Polynomial Learning Rate Decay - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: cspresnet50 LR: 0.1 Layers: 50 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Weight Decay: 0.005 Interpolation: bilinear Training Steps: 8000000 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.57% Top 5 Accuracy: 94.71% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tf-efficientnet-lite.md
# (Tensorflow) EfficientNet Lite **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2). EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tf_efficientnet_lite0', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: TF EfficientNet Lite Paper: Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for Models: - Name: tf_efficientnet_lite0 In Collection: TF EfficientNet Lite Metadata: FLOPs: 488052032 Parameters: 4650000 File Size: 18820223 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - RELU6 Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_lite0 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1596 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.83% Top 5 Accuracy: 92.17% - Name: tf_efficientnet_lite1 In Collection: TF EfficientNet Lite Metadata: FLOPs: 773639520 Parameters: 5420000 File Size: 21939331 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - RELU6 Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_lite1 Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1607 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.67% Top 5 Accuracy: 93.24% - Name: tf_efficientnet_lite2 In Collection: TF EfficientNet Lite Metadata: FLOPs: 1068494432 Parameters: 6090000 File Size: 24658687 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - RELU6 Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_lite2 Crop Pct: '0.89' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1618 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.48% Top 5 Accuracy: 93.75% - Name: tf_efficientnet_lite3 In Collection: TF EfficientNet Lite Metadata: FLOPs: 2011534304 Parameters: 8199999 File Size: 33161413 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - RELU6 Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_lite3 Crop Pct: '0.904' Image Size: '300' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1629 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.83% Top 5 Accuracy: 94.91% - Name: tf_efficientnet_lite4 In Collection: TF EfficientNet Lite Metadata: FLOPs: 5164802912 Parameters: 13010000 File Size: 52558819 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - RELU6 Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_lite4 Crop Pct: '0.92' Image Size: '380' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1640 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.54% Top 5 Accuracy: 95.66% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/res2next.md
# Res2NeXt **Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('res2next50', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `res2next50`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('res2next50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{Gao_2021, title={Res2Net: A New Multi-Scale Backbone Architecture}, volume={43}, ISSN={1939-3539}, url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, DOI={10.1109/tpami.2019.2938758}, number={2}, journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, year={2021}, month={Feb}, pages={652–662} } ``` <!-- Type: model-index Collections: - Name: Res2NeXt Paper: Title: 'Res2Net: A New Multi-scale Backbone Architecture' URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone Models: - Name: res2next50 In Collection: Res2NeXt Metadata: FLOPs: 5396798208 Parameters: 24670000 File Size: 99019592 Architecture: - Batch Normalization - Convolution - Global Average Pooling - ReLU - Res2NeXt Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 4x Titan Xp GPUs ID: res2next50 LR: 0.1 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.24% Top 5 Accuracy: 93.91% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/legacy-se-resnext.md
# (Legacy) SE-ResNeXt **SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SE ResNeXt Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_seresnext101_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 10287698672 Parameters: 48960000 File Size: 196466866 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext101_32x4d LR: 0.6 Epochs: 100 Layers: 101 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L462 Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.23% Top 5 Accuracy: 95.02% - Name: legacy_seresnext26_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 3187342304 Parameters: 16790000 File Size: 67346327 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext26_32x4d LR: 0.6 Epochs: 100 Layers: 26 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L448 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.11% Top 5 Accuracy: 93.31% - Name: legacy_seresnext50_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 5459954352 Parameters: 27560000 File Size: 110559176 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext50_32x4d LR: 0.6 Epochs: 100 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L455 Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.08% Top 5 Accuracy: 94.43% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/xception.md
# Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('xception', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/ZagoruykoK16, @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: xception In Collection: Xception Metadata: FLOPs: 10600506792 Parameters: 22860000 File Size: 91675053 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception Crop Pct: '0.897' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception.py#L229 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.05% Top 5 Accuracy: 94.4% - Name: xception41 In Collection: Xception Metadata: FLOPs: 11681983232 Parameters: 26970000 File Size: 108422028 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception41 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L181 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.54% Top 5 Accuracy: 94.28% - Name: xception65 In Collection: Xception Metadata: FLOPs: 17585702144 Parameters: 39920000 File Size: 160536780 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L200 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.55% Top 5 Accuracy: 94.66% - Name: xception71 In Collection: Xception Metadata: FLOPs: 22817346560 Parameters: 42340000 File Size: 170295556 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception71 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L219 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.88% Top 5 Accuracy: 94.93% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/gloun-seresnext.md
# (Gluon) SE-ResNeXt **SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `gluon_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Gloun SEResNeXt Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: gluon_seresnext101_32x4d In Collection: Gloun SEResNeXt Metadata: FLOPs: 10302923504 Parameters: 48960000 File Size: 196505510 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: gluon_seresnext101_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L219 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.87% Top 5 Accuracy: 95.29% - Name: gluon_seresnext101_64x4d In Collection: Gloun SEResNeXt Metadata: FLOPs: 19958950640 Parameters: 88230000 File Size: 353875948 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: gluon_seresnext101_64x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L229 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.88% Top 5 Accuracy: 95.31% - Name: gluon_seresnext50_32x4d In Collection: Gloun SEResNeXt Metadata: FLOPs: 5475179184 Parameters: 27560000 File Size: 110578827 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: gluon_seresnext50_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L209 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.92% Top 5 Accuracy: 94.82% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/dla.md
# Deep Layer Aggregation Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks. IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('dla102', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `dla102`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('dla102', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{yu2019deep, title={Deep Layer Aggregation}, author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell}, year={2019}, eprint={1707.06484}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: DLA Paper: Title: Deep Layer Aggregation URL: https://paperswithcode.com/paper/deep-layer-aggregation Models: - Name: dla102 In Collection: DLA Metadata: FLOPs: 7192952808 Parameters: 33270000 File Size: 135290579 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: dla102 LR: 0.1 Epochs: 120 Layers: 102 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410 Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.03% Top 5 Accuracy: 93.95% - Name: dla102x In Collection: DLA Metadata: FLOPs: 5886821352 Parameters: 26310000 File Size: 107552695 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: dla102x LR: 0.1 Epochs: 120 Layers: 102 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418 Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.51% Top 5 Accuracy: 94.23% - Name: dla102x2 In Collection: DLA Metadata: FLOPs: 9343847400 Parameters: 41280000 File Size: 167645295 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: dla102x2 LR: 0.1 Epochs: 120 Layers: 102 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426 Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.44% Top 5 Accuracy: 94.65% - Name: dla169 In Collection: DLA Metadata: FLOPs: 11598004200 Parameters: 53390000 File Size: 216547113 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: dla169 LR: 0.1 Epochs: 120 Layers: 169 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434 Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.69% Top 5 Accuracy: 94.33% - Name: dla34 In Collection: DLA Metadata: FLOPs: 3070105576 Parameters: 15740000 File Size: 63228658 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla34 LR: 0.1 Epochs: 120 Layers: 32 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362 Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.62% Top 5 Accuracy: 92.06% - Name: dla46_c In Collection: DLA Metadata: FLOPs: 583277288 Parameters: 1300000 File Size: 5307963 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla46_c LR: 0.1 Epochs: 120 Layers: 46 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369 Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 64.87% Top 5 Accuracy: 86.29% - Name: dla46x_c In Collection: DLA Metadata: FLOPs: 544052200 Parameters: 1070000 File Size: 4387641 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla46x_c LR: 0.1 Epochs: 120 Layers: 46 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378 Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 65.98% Top 5 Accuracy: 86.99% - Name: dla60 In Collection: DLA Metadata: FLOPs: 4256251880 Parameters: 22040000 File Size: 89560235 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla60 LR: 0.1 Epochs: 120 Layers: 60 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394 Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.04% Top 5 Accuracy: 93.32% - Name: dla60_res2net In Collection: DLA Metadata: FLOPs: 4147578504 Parameters: 20850000 File Size: 84886593 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla60_res2net Layers: 60 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.46% Top 5 Accuracy: 94.21% - Name: dla60_res2next In Collection: DLA Metadata: FLOPs: 3485335272 Parameters: 17030000 File Size: 69639245 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla60_res2next Layers: 60 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.44% Top 5 Accuracy: 94.16% - Name: dla60x In Collection: DLA Metadata: FLOPs: 3544204264 Parameters: 17350000 File Size: 70883139 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla60x LR: 0.1 Epochs: 120 Layers: 60 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402 Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.25% Top 5 Accuracy: 94.02% - Name: dla60x_c In Collection: DLA Metadata: FLOPs: 593325032 Parameters: 1320000 File Size: 5454396 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - DLA Bottleneck Residual Block - DLA Residual Block - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: dla60x_c LR: 0.1 Epochs: 120 Layers: 60 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386 Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 67.91% Top 5 Accuracy: 88.42% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/skresnext.md
# SK-ResNeXt **SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('skresnext50_32x4d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{li2019selective, title={Selective Kernel Networks}, author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, year={2019}, eprint={1903.06586}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SKResNeXt Paper: Title: Selective Kernel Networks URL: https://paperswithcode.com/paper/selective-kernel-networks Models: - Name: skresnext50_32x4d In Collection: SKResNeXt Metadata: FLOPs: 5739845824 Parameters: 27480000 File Size: 110340975 Architecture: - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnext50_32x4d LR: 0.1 Epochs: 100 Layers: 50 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.15% Top 5 Accuracy: 94.64% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/mixnet.md
# MixNet **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('mixnet_l', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `mixnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mixconv, title={MixConv: Mixed Depthwise Convolutional Kernels}, author={Mingxing Tan and Quoc V. Le}, year={2019}, eprint={1907.09595}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: MixNet Paper: Title: 'MixConv: Mixed Depthwise Convolutional Kernels' URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels Models: - Name: mixnet_l In Collection: MixNet Metadata: FLOPs: 738671316 Parameters: 7330000 File Size: 29608232 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: mixnet_l Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1669 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.98% Top 5 Accuracy: 94.18% - Name: mixnet_m In Collection: MixNet Metadata: FLOPs: 454543374 Parameters: 5010000 File Size: 20298347 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: mixnet_m Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1660 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.27% Top 5 Accuracy: 93.42% - Name: mixnet_s In Collection: MixNet Metadata: FLOPs: 321264910 Parameters: 4130000 File Size: 16727982 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: mixnet_s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1651 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.99% Top 5 Accuracy: 92.79% - Name: mixnet_xl In Collection: MixNet Metadata: FLOPs: 1195880424 Parameters: 11900000 File Size: 48001170 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: mixnet_xl Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1678 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.47% Top 5 Accuracy: 94.93% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/vision-transformer.md
# Vision Transformer (ViT) The **Vision Transformer** is a model for image classification that employs a Transformer-like architecture over patches of the image. This includes the use of [Multi-Head Attention](https://paperswithcode.com/method/multi-head-attention), [Scaled Dot-Product Attention](https://paperswithcode.com/method/scaled) and other architectural features seen in the [Transformer](https://paperswithcode.com/method/transformer) architecture traditionally used for NLP. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('vit_base_patch16_224', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `vit_base_patch16_224`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{dosovitskiy2020image, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, year={2020}, eprint={2010.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Vision Transformer Paper: Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale' URL: https://paperswithcode.com/paper/an-image-is-worth-16x16-words-transformers-1 Models: - Name: vit_base_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 67394605056 Parameters: 86570000 File Size: 346292833 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch16_224 LR: 0.0008 Epochs: 90 Dropout: 0.0 Crop Pct: '0.9' Batch Size: 4096 Image Size: '224' Warmup Steps: 10000 Weight Decay: 0.03 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L503 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.78% Top 5 Accuracy: 96.13% - Name: vit_base_patch16_384 In Collection: Vision Transformer Metadata: FLOPs: 49348245504 Parameters: 86860000 File Size: 347460194 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch16_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L522 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.2% Top 5 Accuracy: 97.22% - Name: vit_base_patch32_384 In Collection: Vision Transformer Metadata: FLOPs: 12656142336 Parameters: 88300000 File Size: 353210979 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch32_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L532 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.66% Top 5 Accuracy: 96.13% - Name: vit_base_resnet50_384 In Collection: Vision Transformer Metadata: FLOPs: 49461491712 Parameters: 98950000 File Size: 395854632 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_resnet50_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L653 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.99% Top 5 Accuracy: 97.3% - Name: vit_large_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 119294746624 Parameters: 304330000 File Size: 1217350532 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_large_patch16_224 Crop Pct: '0.9' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L542 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.06% Top 5 Accuracy: 96.44% - Name: vit_large_patch16_384 In Collection: Vision Transformer Metadata: FLOPs: 174702764032 Parameters: 304720000 File Size: 1218907013 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_large_patch16_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L561 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.17% Top 5 Accuracy: 97.36% - Name: vit_small_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 28236450816 Parameters: 48750000 File Size: 195031454 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_small_patch16_224 Crop Pct: '0.9' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L490 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.85% Top 5 Accuracy: 93.42% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/regnetx.md
# RegNetX **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): $$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ For **RegNetX** we have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('regnetx_002', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `regnetx_002`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('regnetx_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetX Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnetx_002 In Collection: RegNetX Metadata: FLOPs: 255276032 Parameters: 2680000 File Size: 10862199 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L337 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 68.75% Top 5 Accuracy: 88.56% - Name: regnetx_004 In Collection: RegNetX Metadata: FLOPs: 510619136 Parameters: 5160000 File Size: 20841309 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L343 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.39% Top 5 Accuracy: 90.82% - Name: regnetx_006 In Collection: RegNetX Metadata: FLOPs: 771659136 Parameters: 6200000 File Size: 24965172 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.84% Top 5 Accuracy: 91.68% - Name: regnetx_008 In Collection: RegNetX Metadata: FLOPs: 1027038208 Parameters: 7260000 File Size: 29235944 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L355 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.05% Top 5 Accuracy: 92.34% - Name: regnetx_016 In Collection: RegNetX Metadata: FLOPs: 2059337856 Parameters: 9190000 File Size: 36988158 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L361 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.95% Top 5 Accuracy: 93.43% - Name: regnetx_032 In Collection: RegNetX Metadata: FLOPs: 4082555904 Parameters: 15300000 File Size: 61509573 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L367 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.15% Top 5 Accuracy: 94.09% - Name: regnetx_040 In Collection: RegNetX Metadata: FLOPs: 5095167744 Parameters: 22120000 File Size: 88844824 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L373 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.48% Top 5 Accuracy: 94.25% - Name: regnetx_064 In Collection: RegNetX Metadata: FLOPs: 8303405824 Parameters: 26210000 File Size: 105184854 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L379 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.06% Top 5 Accuracy: 94.47% - Name: regnetx_080 In Collection: RegNetX Metadata: FLOPs: 10276726784 Parameters: 39570000 File Size: 158720042 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L385 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.21% Top 5 Accuracy: 94.55% - Name: regnetx_120 In Collection: RegNetX Metadata: FLOPs: 15536378368 Parameters: 46110000 File Size: 184866342 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L391 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.61% Top 5 Accuracy: 94.73% - Name: regnetx_160 In Collection: RegNetX Metadata: FLOPs: 20491740672 Parameters: 54280000 File Size: 217623862 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L397 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.84% Top 5 Accuracy: 94.82% - Name: regnetx_320 In Collection: RegNetX Metadata: FLOPs: 40798958592 Parameters: 107810000 File Size: 431962133 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.25% Top 5 Accuracy: 95.03% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/csp-darknet.md
# CSP-DarkNet **CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('cspdarknet53', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{bochkovskiy2020yolov4, title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, year={2020}, eprint={2004.10934}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP DarkNet Paper: Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection' URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object Models: - Name: cspdarknet53 In Collection: CSP DarkNet Metadata: FLOPs: 8545018880 Parameters: 27640000 File Size: 110775135 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Mish - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - CutMix - Label Smoothing - Mosaic - Polynomial Learning Rate Decay - SGD with Momentum - Self-Adversarial Training - Weight Decay Training Data: - ImageNet Training Resources: 1x NVIDIA RTX 2070 GPU ID: cspdarknet53 LR: 0.1 Layers: 53 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Warmup Steps: 1000 Weight Decay: 0.0005 Interpolation: bilinear Training Steps: 8000000 FPS (GPU RTX 2070): 66 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.05% Top 5 Accuracy: 95.09% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tf-efficientnet.md
# (Tensorflow) EfficientNet **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tf_efficientnet_b0', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tf_efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: TF EfficientNet Paper: Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for Models: - Name: tf_efficientnet_b0 In Collection: TF EfficientNet Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21383997 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_b0 LR: 0.256 Epochs: 350 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1241 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.85% Top 5 Accuracy: 93.23% - Name: tf_efficientnet_b1 In Collection: TF EfficientNet Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31512534 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b1 LR: 0.256 Epochs: 350 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1251 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.84% Top 5 Accuracy: 94.2% - Name: tf_efficientnet_b2 In Collection: TF EfficientNet Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36797929 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b2 LR: 0.256 Epochs: 350 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1261 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.07% Top 5 Accuracy: 94.9% - Name: tf_efficientnet_b3 In Collection: TF EfficientNet Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49381362 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b3 LR: 0.256 Epochs: 350 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1271 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.65% Top 5 Accuracy: 95.72% - Name: tf_efficientnet_b4 In Collection: TF EfficientNet Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77989689 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_b4 LR: 0.256 Epochs: 350 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1281 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.03% Top 5 Accuracy: 96.3% - Name: tf_efficientnet_b5 In Collection: TF EfficientNet Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122403150 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b5 LR: 0.256 Epochs: 350 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1291 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.81% Top 5 Accuracy: 96.75% - Name: tf_efficientnet_b6 In Collection: TF EfficientNet Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173232007 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b6 LR: 0.256 Epochs: 350 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1301 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.11% Top 5 Accuracy: 96.89% - Name: tf_efficientnet_b7 In Collection: TF EfficientNet Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266850607 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b7 LR: 0.256 Epochs: 350 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1312 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.93% Top 5 Accuracy: 97.2% - Name: tf_efficientnet_b8 In Collection: TF EfficientNet Metadata: FLOPs: 80962956270 Parameters: 87410000 File Size: 351379853 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b8 LR: 0.256 Epochs: 350 Crop Pct: '0.954' Momentum: 0.9 Batch Size: 2048 Image Size: '672' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1323 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.35% Top 5 Accuracy: 97.39% - Name: tf_efficientnet_el In Collection: TF EfficientNet Metadata: FLOPs: 9356616096 Parameters: 10590000 File Size: 42800271 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_el Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1551 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.45% Top 5 Accuracy: 95.17% - Name: tf_efficientnet_em In Collection: TF EfficientNet Metadata: FLOPs: 3636607040 Parameters: 6900000 File Size: 27933644 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_em Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1541 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.71% Top 5 Accuracy: 94.33% - Name: tf_efficientnet_es In Collection: TF EfficientNet Metadata: FLOPs: 2057577472 Parameters: 5440000 File Size: 22008479 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_es Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1531 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.28% Top 5 Accuracy: 93.6% - Name: tf_efficientnet_l2_ns_475 In Collection: TF EfficientNet Metadata: FLOPs: 217795669644 Parameters: 480310000 File Size: 1925950424 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_l2_ns_475 LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.936' Momentum: 0.9 Batch Size: 2048 Image Size: '475' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1509 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 88.24% Top 5 Accuracy: 98.55% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/hrnet.md
# HRNet **HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several ($4$ in the paper) stages and the $n$th stage contains $n$ streams corresponding to $n$ resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('hrnet_w18', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `hrnet_w18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('hrnet_w18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{sun2019highresolution, title={High-Resolution Representations for Labeling Pixels and Regions}, author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, year={2019}, eprint={1904.04514}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: HRNet Paper: Title: Deep High-Resolution Representation Learning for Visual Recognition URL: https://paperswithcode.com/paper/190807919 Models: - Name: hrnet_w18 In Collection: HRNet Metadata: FLOPs: 5547205500 Parameters: 21300000 File Size: 85718883 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L800 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.76% Top 5 Accuracy: 93.44% - Name: hrnet_w18_small In Collection: HRNet Metadata: FLOPs: 2071651488 Parameters: 13190000 File Size: 52934302 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L790 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.34% Top 5 Accuracy: 90.68% - Name: hrnet_w18_small_v2 In Collection: HRNet Metadata: FLOPs: 3360023160 Parameters: 15600000 File Size: 62682879 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small_v2 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L795 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.41% - Name: hrnet_w30 In Collection: HRNet Metadata: FLOPs: 10474119492 Parameters: 37710000 File Size: 151452218 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w30 Epochs: 100 Layers: 30 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L805 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.21% Top 5 Accuracy: 94.22% - Name: hrnet_w32 In Collection: HRNet Metadata: FLOPs: 11524528320 Parameters: 41230000 File Size: 165547812 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 60 hours ID: hrnet_w32 Epochs: 100 Layers: 32 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L810 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.45% Top 5 Accuracy: 94.19% - Name: hrnet_w40 In Collection: HRNet Metadata: FLOPs: 16381182192 Parameters: 57560000 File Size: 230899236 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w40 Epochs: 100 Layers: 40 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L815 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.93% Top 5 Accuracy: 94.48% - Name: hrnet_w44 In Collection: HRNet Metadata: FLOPs: 19202520264 Parameters: 67060000 File Size: 268957432 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w44 Epochs: 100 Layers: 44 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L820 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.89% Top 5 Accuracy: 94.37% - Name: hrnet_w48 In Collection: HRNet Metadata: FLOPs: 22285865760 Parameters: 77470000 File Size: 310603710 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 80 hours ID: hrnet_w48 Epochs: 100 Layers: 48 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L825 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.32% Top 5 Accuracy: 94.51% - Name: hrnet_w64 In Collection: HRNet Metadata: FLOPs: 37239321984 Parameters: 128060000 File Size: 513071818 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w64 Epochs: 100 Layers: 64 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L830 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.46% Top 5 Accuracy: 94.65% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/gloun-xception.md
# (Gluon) Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('gluon_xception65', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `gluon_xception65`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('gluon_xception65', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Gloun Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: gluon_xception65 In Collection: Gloun Xception Metadata: FLOPs: 17594889728 Parameters: 39920000 File Size: 160551306 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_xception.py#L241 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.7% Top 5 Accuracy: 94.87% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tf-mobilenet-v3.md
# (Tensorflow) MobileNet v3 **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-02244, author = {Andrew Howard and Mark Sandler and Grace Chu and Liang{-}Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, title = {Searching for MobileNetV3}, journal = {CoRR}, volume = {abs/1905.02244}, year = {2019}, url = {http://arxiv.org/abs/1905.02244}, archivePrefix = {arXiv}, eprint = {1905.02244}, timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: TF MobileNet V3 Paper: Title: Searching for MobileNetV3 URL: https://paperswithcode.com/paper/searching-for-mobilenetv3 Models: - Name: tf_mobilenetv3_large_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 194323712 Parameters: 3990000 File Size: 16097377 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_075 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.45% Top 5 Accuracy: 91.34% - Name: tf_mobilenetv3_large_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 274535288 Parameters: 5480000 File Size: 22076649 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.51% Top 5 Accuracy: 92.61% - Name: tf_mobilenetv3_large_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 267216928 Parameters: 3920000 File Size: 15836368 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_minimal_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.24% Top 5 Accuracy: 90.64% - Name: tf_mobilenetv3_small_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 48457664 Parameters: 2040000 File Size: 8242701 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_075 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 65.72% Top 5 Accuracy: 86.13% - Name: tf_mobilenetv3_small_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 65450600 Parameters: 2540000 File Size: 10256398 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 67.92% Top 5 Accuracy: 87.68% - Name: tf_mobilenetv3_small_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 60827936 Parameters: 2040000 File Size: 8258083 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_minimal_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 62.91% Top 5 Accuracy: 84.24% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/ssl-resnext.md
# SSL ResNeXT A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('ssl_resnext101_32x16d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `ssl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('ssl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: ssl_resnext101_32x16d In Collection: SSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L944 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.84% Top 5 Accuracy: 96.09% - Name: ssl_resnext101_32x4d In Collection: SSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L924 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.91% Top 5 Accuracy: 95.73% - Name: ssl_resnext101_32x8d In Collection: SSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L934 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.61% Top 5 Accuracy: 96.04% - Name: ssl_resnext50_32x4d In Collection: SSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L914 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.3% Top 5 Accuracy: 95.41% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/swsl-resnet.md
# SWSL ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('swsl_resnet18', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `swsl_resnet18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('swsl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SWSL ResNet Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: swsl_resnet18 In Collection: SWSL ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46811375 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnet18 LR: 0.0015 Epochs: 30 Layers: 18 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L954 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.28% Top 5 Accuracy: 91.76% - Name: swsl_resnet50 In Collection: SWSL ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102480594 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnet50 LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L965 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.14% Top 5 Accuracy: 95.97% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/densenet.md
# DenseNet **DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers. The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling) ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('densenet121', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `densenet121`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('densenet121', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HuangLW16a, author = {Gao Huang and Zhuang Liu and Kilian Q. Weinberger}, title = {Densely Connected Convolutional Networks}, journal = {CoRR}, volume = {abs/1608.06993}, year = {2016}, url = {http://arxiv.org/abs/1608.06993}, archivePrefix = {arXiv}, eprint = {1608.06993}, timestamp = {Mon, 10 Sep 2018 15:49:32 +0200}, biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` ``` @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ``` <!-- Type: model-index Collections: - Name: DenseNet Paper: Title: Densely Connected Convolutional Networks URL: https://paperswithcode.com/paper/densely-connected-convolutional-networks Models: - Name: densenet121 In Collection: DenseNet Metadata: FLOPs: 3641843200 Parameters: 7980000 File Size: 32376726 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet121 LR: 0.1 Epochs: 90 Layers: 121 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L295 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.56% Top 5 Accuracy: 92.65% - Name: densenet161 In Collection: DenseNet Metadata: FLOPs: 9931959264 Parameters: 28680000 File Size: 115730790 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet161 LR: 0.1 Epochs: 90 Layers: 161 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L347 Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.36% Top 5 Accuracy: 93.63% - Name: densenet169 In Collection: DenseNet Metadata: FLOPs: 4316945792 Parameters: 14150000 File Size: 57365526 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet169 LR: 0.1 Epochs: 90 Layers: 169 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L327 Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.9% Top 5 Accuracy: 93.02% - Name: densenet201 In Collection: DenseNet Metadata: FLOPs: 5514321024 Parameters: 20010000 File Size: 81131730 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet201 LR: 0.1 Epochs: 90 Layers: 201 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L337 Weights: https://download.pytorch.org/models/densenet201-c1103571.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.29% Top 5 Accuracy: 93.48% - Name: densenetblur121d In Collection: DenseNet Metadata: FLOPs: 3947812864 Parameters: 8000000 File Size: 32456500 Architecture: - 1x1 Convolution - Batch Normalization - Blur Pooling - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: densenetblur121d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L305 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.59% Top 5 Accuracy: 93.2% - Name: tv_densenet121 In Collection: DenseNet Metadata: FLOPs: 3641843200 Parameters: 7980000 File Size: 32342954 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_densenet121 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L379 Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.74% Top 5 Accuracy: 92.15% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/legacy-senet.md
# (Legacy) SENet A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. The weights from this model were ported from Gluon. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('legacy_senet154', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `legacy_senet154`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('legacy_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SENet Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_senet154 In Collection: Legacy SENet Metadata: FLOPs: 26659556016 Parameters: 115090000 File Size: 461488402 Architecture: - Convolution - Dense Connections - Global Average Pooling - Max Pooling - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_senet154 LR: 0.6 Epochs: 100 Layers: 154 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L440 Weights: http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.33% Top 5 Accuracy: 95.51% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/mobilenet-v3.md
# MobileNet v3 **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('mobilenetv3_large_100', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `mobilenetv3_large_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-02244, author = {Andrew Howard and Mark Sandler and Grace Chu and Liang{-}Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, title = {Searching for MobileNetV3}, journal = {CoRR}, volume = {abs/1905.02244}, year = {2019}, url = {http://arxiv.org/abs/1905.02244}, archivePrefix = {arXiv}, eprint = {1905.02244}, timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: MobileNet V3 Paper: Title: Searching for MobileNetV3 URL: https://paperswithcode.com/paper/searching-for-mobilenetv3 Models: - Name: mobilenetv3_large_100 In Collection: MobileNet V3 Metadata: FLOPs: 287193752 Parameters: 5480000 File Size: 22076443 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: mobilenetv3_large_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L363 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.77% Top 5 Accuracy: 92.54% - Name: mobilenetv3_rw In Collection: MobileNet V3 Metadata: FLOPs: 287190638 Parameters: 5480000 File Size: 22064048 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: mobilenetv3_rw LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L384 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.62% Top 5 Accuracy: 92.71% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/efficientnet.md
# EfficientNet **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('efficientnet_b0', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: EfficientNet Paper: Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for Models: - Name: efficientnet_b0 In Collection: EfficientNet Metadata: FLOPs: 511241564 Parameters: 5290000 File Size: 21376743 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b0 Layers: 18 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1002 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.71% Top 5 Accuracy: 93.52% - Name: efficientnet_b1 In Collection: EfficientNet Metadata: FLOPs: 909691920 Parameters: 7790000 File Size: 31502706 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b1 Crop Pct: '0.875' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1011 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.71% Top 5 Accuracy: 94.15% - Name: efficientnet_b2 In Collection: EfficientNet Metadata: FLOPs: 1265324514 Parameters: 9110000 File Size: 36788104 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2 Crop Pct: '0.875' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1020 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.38% Top 5 Accuracy: 95.08% - Name: efficientnet_b2a In Collection: EfficientNet Metadata: FLOPs: 1452041554 Parameters: 9110000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2a Crop Pct: '1.0' Image Size: '288' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1029 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.61% Top 5 Accuracy: 95.32% - Name: efficientnet_b3 In Collection: EfficientNet Metadata: FLOPs: 2327905920 Parameters: 12230000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3 Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1038 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.08% Top 5 Accuracy: 96.03% - Name: efficientnet_b3a In Collection: EfficientNet Metadata: FLOPs: 2600628304 Parameters: 12230000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3a Crop Pct: '1.0' Image Size: '320' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1047 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.25% Top 5 Accuracy: 96.11% - Name: efficientnet_em In Collection: EfficientNet Metadata: FLOPs: 3935516480 Parameters: 6900000 File Size: 27927309 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_em Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1118 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.26% Top 5 Accuracy: 94.79% - Name: efficientnet_es In Collection: EfficientNet Metadata: FLOPs: 2317181824 Parameters: 5440000 File Size: 22003339 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_es Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1110 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.09% Top 5 Accuracy: 93.93% - Name: efficientnet_lite0 In Collection: EfficientNet Metadata: FLOPs: 510605024 Parameters: 4650000 File Size: 18820005 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_lite0 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1163 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.5% Top 5 Accuracy: 92.51% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tresnet.md
# TResNet A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tresnet_l', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TResNet Paper: Title: 'TResNet: High Performance GPU-Dedicated Architecture' URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated Models: - Name: tresnet_l In Collection: TResNet Metadata: FLOPs: 10873416792 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.49% Top 5 Accuracy: 95.62% - Name: tresnet_l_448 In Collection: TResNet Metadata: FLOPs: 43488238584 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.26% Top 5 Accuracy: 95.98% - Name: tresnet_m In Collection: TResNet Metadata: FLOPs: 5733048064 Parameters: 41282200 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs Training Time: < 24 hours ID: tresnet_m LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.8% Top 5 Accuracy: 94.86% - Name: tresnet_m_448 In Collection: TResNet Metadata: FLOPs: 22929743104 Parameters: 29278464 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_m_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.72% Top 5 Accuracy: 95.57% - Name: tresnet_xl In Collection: TResNet Metadata: FLOPs: 15162534034 Parameters: 75646610 File Size: 314378965 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.05% Top 5 Accuracy: 95.93% - Name: tresnet_xl_448 In Collection: TResNet Metadata: FLOPs: 60641712730 Parameters: 75646610 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.06% Top 5 Accuracy: 96.19% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tf-mixnet.md
# (Tensorflow) MixNet **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tf_mixnet_l', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mixconv, title={MixConv: Mixed Depthwise Convolutional Kernels}, author={Mingxing Tan and Quoc V. Le}, year={2019}, eprint={1907.09595}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TF MixNet Paper: Title: 'MixConv: Mixed Depthwise Convolutional Kernels' URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels Models: - Name: tf_mixnet_l In Collection: TF MixNet Metadata: FLOPs: 688674516 Parameters: 7330000 File Size: 29620756 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_l Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1720 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.78% Top 5 Accuracy: 94.0% - Name: tf_mixnet_m In Collection: TF MixNet Metadata: FLOPs: 416633502 Parameters: 5010000 File Size: 20310871 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_m Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1709 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.96% Top 5 Accuracy: 93.16% - Name: tf_mixnet_s In Collection: TF MixNet Metadata: FLOPs: 302587678 Parameters: 4130000 File Size: 16738218 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1698 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.68% Top 5 Accuracy: 92.64% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/gloun-resnext.md
# (Gluon) ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('gluon_resnext101_32x4d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `gluon_resnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('gluon_resnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/XieGDTH16, author = {Saining Xie and Ross B. Girshick and Piotr Doll{\'{a}}r and Zhuowen Tu and Kaiming He}, title = {Aggregated Residual Transformations for Deep Neural Networks}, journal = {CoRR}, volume = {abs/1611.05431}, year = {2016}, url = {http://arxiv.org/abs/1611.05431}, archivePrefix = {arXiv}, eprint = {1611.05431}, timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Gloun ResNeXt Paper: Title: Aggregated Residual Transformations for Deep Neural Networks URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep Models: - Name: gluon_resnext101_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177367414 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L193 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.33% Top 5 Accuracy: 94.91% - Name: gluon_resnext101_64x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 19954172928 Parameters: 83460000 File Size: 334737852 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_64x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L201 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.63% Top 5 Accuracy: 95.0% - Name: gluon_resnext50_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100441719 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext50_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L185 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.35% Top 5 Accuracy: 94.42% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/seresnext.md
# SE-ResNeXt **SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resneXt) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('seresnext26d_32x4d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `seresnext26d_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('seresnext26d_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SEResNeXt Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: seresnext26d_32x4d In Collection: SEResNeXt Metadata: FLOPs: 3507053024 Parameters: 16810000 File Size: 67425193 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: seresnext26d_32x4d LR: 0.6 Epochs: 100 Layers: 26 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1234 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.59% Top 5 Accuracy: 93.61% - Name: seresnext26t_32x4d In Collection: SEResNeXt Metadata: FLOPs: 3466436448 Parameters: 16820000 File Size: 67414838 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: seresnext26t_32x4d LR: 0.6 Epochs: 100 Layers: 26 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1246 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.99% Top 5 Accuracy: 93.73% - Name: seresnext50_32x4d In Collection: SEResNeXt Metadata: FLOPs: 5475179184 Parameters: 27560000 File Size: 110569859 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: seresnext50_32x4d LR: 0.6 Epochs: 100 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1267 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.27% Top 5 Accuracy: 95.62% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/swsl-resnext.md
# SWSL ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('swsl_resnext101_32x16d', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SWSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: swsl_resnext101_32x16d In Collection: SWSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L1009 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.34% Top 5 Accuracy: 96.84% - Name: swsl_resnext101_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L987 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.22% Top 5 Accuracy: 96.77% - Name: swsl_resnext101_32x8d In Collection: SWSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L998 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.27% Top 5 Accuracy: 97.17% - Name: swsl_resnext50_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L976 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.17% Top 5 Accuracy: 96.23% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/nasnet.md
# NASNet **NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('nasnetalarge', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{zoph2018learning, title={Learning Transferable Architectures for Scalable Image Recognition}, author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, year={2018}, eprint={1707.07012}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: NASNet Paper: Title: Learning Transferable Architectures for Scalable Image Recognition URL: https://paperswithcode.com/paper/learning-transferable-architectures-for Models: - Name: nasnetalarge In Collection: NASNet Metadata: FLOPs: 30242402862 Parameters: 88750000 File Size: 356056626 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Techniques: - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 50x Tesla K40 GPUs ID: nasnetalarge Dropout: 0.5 Crop Pct: '0.911' Momentum: 0.9 Image Size: '331' Interpolation: bicubic Label Smoothing: 0.1 RMSProp $\epsilon$: 1.0 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562 Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.63% Top 5 Accuracy: 96.05% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/selecsls.md
# SelecSLS **SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('selecsls42b', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{Mehta_2020, title={XNect}, volume={39}, ISSN={1557-7368}, url={http://dx.doi.org/10.1145/3386569.3392410}, DOI={10.1145/3386569.3392410}, number={4}, journal={ACM Transactions on Graphics}, publisher={Association for Computing Machinery (ACM)}, author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, year={2020}, month={Jul} } ``` <!-- Type: model-index Collections: - Name: SelecSLS Paper: Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera' URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose Models: - Name: selecsls42b In Collection: SelecSLS Metadata: FLOPs: 3824022528 Parameters: 32460000 File Size: 129948954 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls42b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.18% Top 5 Accuracy: 93.39% - Name: selecsls60 In Collection: SelecSLS Metadata: FLOPs: 4610472600 Parameters: 30670000 File Size: 122839714 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.99% Top 5 Accuracy: 93.83% - Name: selecsls60b In Collection: SelecSLS Metadata: FLOPs: 4657653144 Parameters: 32770000 File Size: 131252898 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.41% Top 5 Accuracy: 94.18% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/tf-inception-v3.md
# (Tensorflow) Inception v3 **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tf_inception_v3', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_inception_v3`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tf_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/SzegedyVISW15, author = {Christian Szegedy and Vincent Vanhoucke and Sergey Ioffe and Jonathon Shlens and Zbigniew Wojna}, title = {Rethinking the Inception Architecture for Computer Vision}, journal = {CoRR}, volume = {abs/1512.00567}, year = {2015}, url = {http://arxiv.org/abs/1512.00567}, archivePrefix = {arXiv}, eprint = {1512.00567}, timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: TF Inception v3 Paper: Title: Rethinking the Inception Architecture for Computer Vision URL: https://paperswithcode.com/paper/rethinking-the-inception-architecture-for Models: - Name: tf_inception_v3 In Collection: TF Inception v3 Metadata: FLOPs: 7352418880 Parameters: 23830000 File Size: 95549439 Architecture: - 1x1 Convolution - Auxiliary Classifier - Average Pooling - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inception-v3 Module - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Gradient Clipping - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 50x NVIDIA Kepler GPUs ID: tf_inception_v3 LR: 0.045 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L449 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.87% Top 5 Accuracy: 93.65% -->
0
hf_public_repos/pytorch-image-models/docs
hf_public_repos/pytorch-image-models/docs/models/legacy-se-resnet.md
# (Legacy) SE-ResNet **SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('legacy_seresnet101', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `legacy_seresnet101`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('legacy_seresnet101', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SE ResNet Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_seresnet101 In Collection: Legacy SE ResNet Metadata: FLOPs: 9762614000 Parameters: 49330000 File Size: 197822624 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet101 LR: 0.6 Epochs: 100 Layers: 101 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L426 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.38% Top 5 Accuracy: 94.26% - Name: legacy_seresnet152 In Collection: Legacy SE ResNet Metadata: FLOPs: 14553578160 Parameters: 66819999 File Size: 268033864 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet152 LR: 0.6 Epochs: 100 Layers: 152 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L433 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.67% Top 5 Accuracy: 94.38% - Name: legacy_seresnet18 In Collection: Legacy SE ResNet Metadata: FLOPs: 2328876024 Parameters: 11780000 File Size: 47175663 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet18 LR: 0.6 Epochs: 100 Layers: 18 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L405 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 71.74% Top 5 Accuracy: 90.34% - Name: legacy_seresnet34 In Collection: Legacy SE ResNet Metadata: FLOPs: 4706201004 Parameters: 21960000 File Size: 87958697 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet34 LR: 0.6 Epochs: 100 Layers: 34 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L412 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.79% Top 5 Accuracy: 92.13% - Name: legacy_seresnet50 In Collection: Legacy SE ResNet Metadata: FLOPs: 4974351024 Parameters: 28090000 File Size: 112611220 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet50 LR: 0.6 Epochs: 100 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Interpolation: bilinear Minibatch Size: 1024 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L419 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.64% Top 5 Accuracy: 93.74% -->
0