pr-include-rev-in-flake

#2
by drbh HF Staff - opened
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. README.md +0 -3
  2. build.toml +9 -10
  3. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_cxy6p3o2latjs.abi3.so} +2 -2
  4. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py +3 -3
  5. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_esifsbuexbtbw.abi3.so} +2 -2
  6. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py +3 -3
  7. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_cuzn3o54ku5iq.abi3.so} +2 -2
  8. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py +3 -3
  9. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_gom2c5vfrl2ic.abi3.so} +2 -2
  10. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py +3 -3
  11. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  12. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_a7sajsuqrick6.abi3.so +3 -0
  13. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py +3 -3
  14. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  15. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_tyogxwmtolvok.abi3.so +3 -0
  16. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py +3 -3
  17. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_5kxpyt5yogkv2.abi3.so +3 -0
  18. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  19. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py +3 -3
  20. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  21. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_titoehueyfqjg.abi3.so +3 -0
  22. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py +3 -3
  23. build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py +0 -46
  24. build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  25. build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py +0 -9
  26. build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/layers.py +0 -84
  27. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  28. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_imqt5tuqtmyt4.abi3.so +3 -0
  29. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py +3 -3
  30. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  31. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_qbnaho3zp2d3o.abi3.so +3 -0
  32. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py +3 -3
  33. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_5oxft6tr6jbvu.abi3.so +3 -0
  34. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  35. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py +3 -3
  36. build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/__init__.py +0 -46
  37. build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  38. build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_ops.py +0 -9
  39. build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/layers.py +0 -84
  40. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  41. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_po264mz2i2ffg.abi3.so +3 -0
  42. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py +3 -3
  43. build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py +0 -46
  44. build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  45. build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py +0 -9
  46. build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/layers.py +0 -84
  47. build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py +0 -46
  48. build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so +0 -3
  49. build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py +0 -9
  50. build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/layers.py +0 -84
README.md CHANGED
@@ -4,6 +4,3 @@ tags:
4
  - kernel
5
  ---
6
 
7
- ![Status](https://hubwebhook.dholtz.com/shield?repo=kernels-community/deformable-detr)
8
-
9
- ## deformable-detr
 
4
  - kernel
5
  ---
6
 
 
 
 
build.toml CHANGED
@@ -1,20 +1,19 @@
1
  [general]
2
  name = "deformable_detr"
3
- universal = false
4
 
5
  [torch]
6
  src = [
7
- "torch-ext/torch_binding.cpp",
8
- "torch-ext/torch_binding.h",
9
  ]
10
 
11
  [kernel.activation]
12
- backend = "cuda"
13
- depends = ["torch"]
14
- include = ["."]
15
  src = [
16
- "deformable_detr/ms_deform_attn_cuda.cu",
17
- "deformable_detr/ms_deform_im2col_cuda.cuh",
18
- "deformable_detr/ms_deform_attn_cuda.cuh",
19
- "deformable_detr/ms_deform_attn_cuda.h",
20
  ]
 
 
 
1
  [general]
2
  name = "deformable_detr"
 
3
 
4
  [torch]
5
  src = [
6
+ "torch-ext/torch_binding.cpp",
7
+ "torch-ext/torch_binding.h"
8
  ]
9
 
10
  [kernel.activation]
11
+ cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ]
 
 
12
  src = [
13
+ "deformable_detr/ms_deform_attn_cuda.cu",
14
+ "deformable_detr/ms_deform_im2col_cuda.cuh",
15
+ "deformable_detr/ms_deform_attn_cuda.cuh",
16
+ "deformable_detr/ms_deform_attn_cuda.h",
17
  ]
18
+ include = ["."]
19
+ depends = [ "torch" ]
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_cxy6p3o2latjs.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae00c12295a458e2534149aea16da0289541447123c19fae59baaf6d6d2752f1
3
- size 6693656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf71a0243675c22ba3207a6f895a907b0699f964575088e054220cea5e2fb2e
3
+ size 5870376
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_cxy6p3o2latjs
3
+ ops = torch.ops._deformable_detr_cxy6p3o2latjs
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_cxy6p3o2latjs::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_esifsbuexbtbw.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ab8cf59779b768359df0fa268b6cd52be2f518dd4fafdd61baec31c64f44813
3
- size 6679440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dce2e84e09fb2a5bf1b47441b226343494807687d8829f141682af9b78e361
3
+ size 5856160
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_esifsbuexbtbw
3
+ ops = torch.ops._deformable_detr_esifsbuexbtbw
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_esifsbuexbtbw::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_cuzn3o54ku5iq.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cdcd6902a03140074cff4cd44bf6b47dc27a32e13e0515a93929c66be186cab
3
- size 6652680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:825c7cb6f9a4350bdcdffa4383d7a527d5fa7b0d9d83222f5d1e72f1c6087841
3
+ size 5841688
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_cuzn3o54ku5iq
3
+ ops = torch.ops._deformable_detr_cuzn3o54ku5iq
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_cuzn3o54ku5iq::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_gom2c5vfrl2ic.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82174ec2812ee672a447b94fb5ec907e348eb3d0be338daddf145a1d74969a6f
3
- size 6686592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe4c67fc885df711581660f72d86dbd0a237c7f106308e55a484725c88e9927
3
+ size 5863312
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_gom2c5vfrl2ic
3
+ ops = torch.ops._deformable_detr_gom2c5vfrl2ic
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_gom2c5vfrl2ic::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9d8540a4ffa00d331f60204fe6baf543a45667d6bba2c0a0b23aca9202b6233
3
- size 6672464
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_a7sajsuqrick6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b4c64eb7931a6f580bd5b806eae1aea43b3bb8c0f115d5d202f151974a5e7b
3
+ size 5853280
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_a7sajsuqrick6
3
+ ops = torch.ops._deformable_detr_a7sajsuqrick6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_a7sajsuqrick6::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa748a4de72c06de09f46b4af4fec7f23cb2c76eb8683c117fefd20833cd3fd8
3
- size 6649800
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_tyogxwmtolvok.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eef07a96ddf574e5b1e07476089a62659a70faa33c82fc79987c54fecb2711f
3
+ size 5834712
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_tyogxwmtolvok
3
+ ops = torch.ops._deformable_detr_tyogxwmtolvok
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_tyogxwmtolvok::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_5kxpyt5yogkv2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1c5bb5376002363e2008eb6db64ebe0c9f6c31f9a635b7420ddfb46dce16b02
3
+ size 5870352
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57e38bac3087c1446307e504b1e22e61ae584d1de7f5b3d15bd7a60780c3431c
3
- size 6693632
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_5kxpyt5yogkv2
3
+ ops = torch.ops._deformable_detr_5kxpyt5yogkv2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_5kxpyt5yogkv2::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d7c09d3bedd89d7119e7023a07784724d3a3f79664b75fce37b778ef3bcfe52
3
- size 6648656
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_titoehueyfqjg.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76b74d4bdbb1f562474b987fd23430d12b9f033183198f35a7dfd21fcc8ce4e1
3
+ size 5837664
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_titoehueyfqjg
3
+ ops = torch.ops._deformable_detr_titoehueyfqjg
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_titoehueyfqjg::{op_name}"
build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py DELETED
@@ -1,46 +0,0 @@
1
- from typing import List
2
- import torch
3
-
4
- from ._ops import ops
5
- from . import layers
6
-
7
-
8
- def ms_deform_attn_backward(
9
- value: torch.Tensor,
10
- spatial_shapes: torch.Tensor,
11
- level_start_index: torch.Tensor,
12
- sampling_loc: torch.Tensor,
13
- attn_weight: torch.Tensor,
14
- grad_output: torch.Tensor,
15
- im2col_step: int,
16
- ) -> List[torch.Tensor]:
17
- return ops.ms_deform_attn_backward(
18
- value,
19
- spatial_shapes,
20
- level_start_index,
21
- sampling_loc,
22
- attn_weight,
23
- grad_output,
24
- im2col_step,
25
- )
26
-
27
-
28
- def ms_deform_attn_forward(
29
- value: torch.Tensor,
30
- spatial_shapes: torch.Tensor,
31
- level_start_index: torch.Tensor,
32
- sampling_loc: torch.Tensor,
33
- attn_weight: torch.Tensor,
34
- im2col_step: int,
35
- ) -> torch.Tensor:
36
- return ops.ms_deform_attn_forward(
37
- value,
38
- spatial_shapes,
39
- level_start_index,
40
- sampling_loc,
41
- attn_weight,
42
- im2col_step,
43
- )
44
-
45
-
46
- __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6856b6efe5130f019f6cb7f964d7a2073f1ecc5cd7afc850334e64798f871dae
3
- size 6833224
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/layers.py DELETED
@@ -1,84 +0,0 @@
1
- from typing import List, Union, Tuple
2
-
3
- from torch import Tensor
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- import torch.nn as nn
7
-
8
- from ._ops import ops
9
-
10
-
11
- class MultiScaleDeformableAttentionFunction(Function):
12
- @staticmethod
13
- def forward(
14
- context,
15
- value: Tensor,
16
- value_spatial_shapes: Tensor,
17
- value_level_start_index: Tensor,
18
- sampling_locations: Tensor,
19
- attention_weights: Tensor,
20
- im2col_step: int,
21
- ):
22
- context.im2col_step = im2col_step
23
- output = ops.ms_deform_attn_forward(
24
- value,
25
- value_spatial_shapes,
26
- value_level_start_index,
27
- sampling_locations,
28
- attention_weights,
29
- context.im2col_step,
30
- )
31
- context.save_for_backward(
32
- value,
33
- value_spatial_shapes,
34
- value_level_start_index,
35
- sampling_locations,
36
- attention_weights,
37
- )
38
- return output
39
-
40
- @staticmethod
41
- @once_differentiable
42
- def backward(context, grad_output):
43
- (
44
- value,
45
- value_spatial_shapes,
46
- value_level_start_index,
47
- sampling_locations,
48
- attention_weights,
49
- ) = context.saved_tensors
50
- grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
- value,
52
- value_spatial_shapes,
53
- value_level_start_index,
54
- sampling_locations,
55
- attention_weights,
56
- grad_output,
57
- context.im2col_step,
58
- )
59
-
60
- return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
-
62
-
63
- class MultiScaleDeformableAttention(nn.Module):
64
- def forward(
65
- self,
66
- value: Tensor,
67
- value_spatial_shapes: Tensor,
68
- value_spatial_shapes_list: List[Tuple],
69
- level_start_index: Tensor,
70
- sampling_locations: Tensor,
71
- attention_weights: Tensor,
72
- im2col_step: int,
73
- ):
74
- return MultiScaleDeformableAttentionFunction.apply(
75
- value,
76
- value_spatial_shapes,
77
- level_start_index,
78
- sampling_locations,
79
- attention_weights,
80
- im2col_step,
81
- )
82
-
83
-
84
- __all__ = ["MultiScaleDeformableAttention"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c76ad874b78882d3108a7fdaf49f8c00b6a6a7dceec63912118f8fa7d07e5f30
3
- size 6800656
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_imqt5tuqtmyt4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1acd032c2f3bc530872e0839d8bec8950b01668c913539a2e14008a1e652560f
3
+ size 5944608
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_imqt5tuqtmyt4
3
+ ops = torch.ops._deformable_detr_imqt5tuqtmyt4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_imqt5tuqtmyt4::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1fb3a24fd95c1cc3cba080ae1c9d4217f377435770c7e423de53b11ecc437dc
3
- size 6686600
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_qbnaho3zp2d3o.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e5074a5afdb137688e20182cf4c9f7cbb1e8a69651c08a570076aeedc8c76b
3
+ size 5863320
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_qbnaho3zp2d3o
3
+ ops = torch.ops._deformable_detr_qbnaho3zp2d3o
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_qbnaho3zp2d3o::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_5oxft6tr6jbvu.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4d0f47c165b9ce95c0328cb7a52e331e4c698746ea8e4d43c7d09c193e34bd
3
+ size 5834720
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5a455975be5790964cc95c6813d293b1aba581f5c2dc132c9a08690bf6e5cad
3
- size 6649808
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_5oxft6tr6jbvu
3
+ ops = torch.ops._deformable_detr_5oxft6tr6jbvu
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_5oxft6tr6jbvu::{op_name}"
build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/__init__.py DELETED
@@ -1,46 +0,0 @@
1
- from typing import List
2
- import torch
3
-
4
- from ._ops import ops
5
- from . import layers
6
-
7
-
8
- def ms_deform_attn_backward(
9
- value: torch.Tensor,
10
- spatial_shapes: torch.Tensor,
11
- level_start_index: torch.Tensor,
12
- sampling_loc: torch.Tensor,
13
- attn_weight: torch.Tensor,
14
- grad_output: torch.Tensor,
15
- im2col_step: int,
16
- ) -> List[torch.Tensor]:
17
- return ops.ms_deform_attn_backward(
18
- value,
19
- spatial_shapes,
20
- level_start_index,
21
- sampling_loc,
22
- attn_weight,
23
- grad_output,
24
- im2col_step,
25
- )
26
-
27
-
28
- def ms_deform_attn_forward(
29
- value: torch.Tensor,
30
- spatial_shapes: torch.Tensor,
31
- level_start_index: torch.Tensor,
32
- sampling_loc: torch.Tensor,
33
- attn_weight: torch.Tensor,
34
- im2col_step: int,
35
- ) -> torch.Tensor:
36
- return ops.ms_deform_attn_forward(
37
- value,
38
- spatial_shapes,
39
- level_start_index,
40
- sampling_loc,
41
- attn_weight,
42
- im2col_step,
43
- )
44
-
45
-
46
- __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c13dce2b080676eb192d87ba83df6ef1f6d0f1101727f4b29185d48dec7281d
3
- size 6829872
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/layers.py DELETED
@@ -1,84 +0,0 @@
1
- from typing import List, Union, Tuple
2
-
3
- from torch import Tensor
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- import torch.nn as nn
7
-
8
- from ._ops import ops
9
-
10
-
11
- class MultiScaleDeformableAttentionFunction(Function):
12
- @staticmethod
13
- def forward(
14
- context,
15
- value: Tensor,
16
- value_spatial_shapes: Tensor,
17
- value_level_start_index: Tensor,
18
- sampling_locations: Tensor,
19
- attention_weights: Tensor,
20
- im2col_step: int,
21
- ):
22
- context.im2col_step = im2col_step
23
- output = ops.ms_deform_attn_forward(
24
- value,
25
- value_spatial_shapes,
26
- value_level_start_index,
27
- sampling_locations,
28
- attention_weights,
29
- context.im2col_step,
30
- )
31
- context.save_for_backward(
32
- value,
33
- value_spatial_shapes,
34
- value_level_start_index,
35
- sampling_locations,
36
- attention_weights,
37
- )
38
- return output
39
-
40
- @staticmethod
41
- @once_differentiable
42
- def backward(context, grad_output):
43
- (
44
- value,
45
- value_spatial_shapes,
46
- value_level_start_index,
47
- sampling_locations,
48
- attention_weights,
49
- ) = context.saved_tensors
50
- grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
- value,
52
- value_spatial_shapes,
53
- value_level_start_index,
54
- sampling_locations,
55
- attention_weights,
56
- grad_output,
57
- context.im2col_step,
58
- )
59
-
60
- return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
-
62
-
63
- class MultiScaleDeformableAttention(nn.Module):
64
- def forward(
65
- self,
66
- value: Tensor,
67
- value_spatial_shapes: Tensor,
68
- value_spatial_shapes_list: List[Tuple],
69
- level_start_index: Tensor,
70
- sampling_locations: Tensor,
71
- attention_weights: Tensor,
72
- im2col_step: int,
73
- ):
74
- return MultiScaleDeformableAttentionFunction.apply(
75
- value,
76
- value_spatial_shapes,
77
- level_start_index,
78
- sampling_locations,
79
- attention_weights,
80
- im2col_step,
81
- )
82
-
83
-
84
- __all__ = ["MultiScaleDeformableAttention"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6ad344319579f0abef7fe1a9d3f479f1c8737994f563a540815a1445020959e
3
- size 6797712
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_po264mz2i2ffg.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129844ba533ee201cd3f2bb0e17a354ee8aa35176c10896454926485acdacdac
3
+ size 5945760
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
1
  import torch
2
+ from . import _deformable_detr_po264mz2i2ffg
3
+ ops = torch.ops._deformable_detr_po264mz2i2ffg
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_deformable_detr_po264mz2i2ffg::{op_name}"
build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py DELETED
@@ -1,46 +0,0 @@
1
- from typing import List
2
- import torch
3
-
4
- from ._ops import ops
5
- from . import layers
6
-
7
-
8
- def ms_deform_attn_backward(
9
- value: torch.Tensor,
10
- spatial_shapes: torch.Tensor,
11
- level_start_index: torch.Tensor,
12
- sampling_loc: torch.Tensor,
13
- attn_weight: torch.Tensor,
14
- grad_output: torch.Tensor,
15
- im2col_step: int,
16
- ) -> List[torch.Tensor]:
17
- return ops.ms_deform_attn_backward(
18
- value,
19
- spatial_shapes,
20
- level_start_index,
21
- sampling_loc,
22
- attn_weight,
23
- grad_output,
24
- im2col_step,
25
- )
26
-
27
-
28
- def ms_deform_attn_forward(
29
- value: torch.Tensor,
30
- spatial_shapes: torch.Tensor,
31
- level_start_index: torch.Tensor,
32
- sampling_loc: torch.Tensor,
33
- attn_weight: torch.Tensor,
34
- im2col_step: int,
35
- ) -> torch.Tensor:
36
- return ops.ms_deform_attn_forward(
37
- value,
38
- spatial_shapes,
39
- level_start_index,
40
- sampling_loc,
41
- attn_weight,
42
- im2col_step,
43
- )
44
-
45
-
46
- __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:20ee462e214e11abd65b44259847f7dbf535b94a3003357251c53a72f4ac4392
3
- size 6693728
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/layers.py DELETED
@@ -1,84 +0,0 @@
1
- from typing import List, Union, Tuple
2
-
3
- from torch import Tensor
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- import torch.nn as nn
7
-
8
- from ._ops import ops
9
-
10
-
11
- class MultiScaleDeformableAttentionFunction(Function):
12
- @staticmethod
13
- def forward(
14
- context,
15
- value: Tensor,
16
- value_spatial_shapes: Tensor,
17
- value_level_start_index: Tensor,
18
- sampling_locations: Tensor,
19
- attention_weights: Tensor,
20
- im2col_step: int,
21
- ):
22
- context.im2col_step = im2col_step
23
- output = ops.ms_deform_attn_forward(
24
- value,
25
- value_spatial_shapes,
26
- value_level_start_index,
27
- sampling_locations,
28
- attention_weights,
29
- context.im2col_step,
30
- )
31
- context.save_for_backward(
32
- value,
33
- value_spatial_shapes,
34
- value_level_start_index,
35
- sampling_locations,
36
- attention_weights,
37
- )
38
- return output
39
-
40
- @staticmethod
41
- @once_differentiable
42
- def backward(context, grad_output):
43
- (
44
- value,
45
- value_spatial_shapes,
46
- value_level_start_index,
47
- sampling_locations,
48
- attention_weights,
49
- ) = context.saved_tensors
50
- grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
- value,
52
- value_spatial_shapes,
53
- value_level_start_index,
54
- sampling_locations,
55
- attention_weights,
56
- grad_output,
57
- context.im2col_step,
58
- )
59
-
60
- return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
-
62
-
63
- class MultiScaleDeformableAttention(nn.Module):
64
- def forward(
65
- self,
66
- value: Tensor,
67
- value_spatial_shapes: Tensor,
68
- value_spatial_shapes_list: List[Tuple],
69
- level_start_index: Tensor,
70
- sampling_locations: Tensor,
71
- attention_weights: Tensor,
72
- im2col_step: int,
73
- ):
74
- return MultiScaleDeformableAttentionFunction.apply(
75
- value,
76
- value_spatial_shapes,
77
- level_start_index,
78
- sampling_locations,
79
- attention_weights,
80
- im2col_step,
81
- )
82
-
83
-
84
- __all__ = ["MultiScaleDeformableAttention"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py DELETED
@@ -1,46 +0,0 @@
1
- from typing import List
2
- import torch
3
-
4
- from ._ops import ops
5
- from . import layers
6
-
7
-
8
- def ms_deform_attn_backward(
9
- value: torch.Tensor,
10
- spatial_shapes: torch.Tensor,
11
- level_start_index: torch.Tensor,
12
- sampling_loc: torch.Tensor,
13
- attn_weight: torch.Tensor,
14
- grad_output: torch.Tensor,
15
- im2col_step: int,
16
- ) -> List[torch.Tensor]:
17
- return ops.ms_deform_attn_backward(
18
- value,
19
- spatial_shapes,
20
- level_start_index,
21
- sampling_loc,
22
- attn_weight,
23
- grad_output,
24
- im2col_step,
25
- )
26
-
27
-
28
- def ms_deform_attn_forward(
29
- value: torch.Tensor,
30
- spatial_shapes: torch.Tensor,
31
- level_start_index: torch.Tensor,
32
- sampling_loc: torch.Tensor,
33
- attn_weight: torch.Tensor,
34
- im2col_step: int,
35
- ) -> torch.Tensor:
36
- return ops.ms_deform_attn_forward(
37
- value,
38
- spatial_shapes,
39
- level_start_index,
40
- sampling_loc,
41
- attn_weight,
42
- im2col_step,
43
- )
44
-
45
-
46
- __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:af2831b68229a910e8703cae2c9e720ded825e401745d38923548c444e56c37b
3
- size 6833456
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _deformable_detr_7c33cbe
3
- ops = torch.ops._deformable_detr_7c33cbe
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_deformable_detr_7c33cbe::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/layers.py DELETED
@@ -1,84 +0,0 @@
1
- from typing import List, Union, Tuple
2
-
3
- from torch import Tensor
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- import torch.nn as nn
7
-
8
- from ._ops import ops
9
-
10
-
11
- class MultiScaleDeformableAttentionFunction(Function):
12
- @staticmethod
13
- def forward(
14
- context,
15
- value: Tensor,
16
- value_spatial_shapes: Tensor,
17
- value_level_start_index: Tensor,
18
- sampling_locations: Tensor,
19
- attention_weights: Tensor,
20
- im2col_step: int,
21
- ):
22
- context.im2col_step = im2col_step
23
- output = ops.ms_deform_attn_forward(
24
- value,
25
- value_spatial_shapes,
26
- value_level_start_index,
27
- sampling_locations,
28
- attention_weights,
29
- context.im2col_step,
30
- )
31
- context.save_for_backward(
32
- value,
33
- value_spatial_shapes,
34
- value_level_start_index,
35
- sampling_locations,
36
- attention_weights,
37
- )
38
- return output
39
-
40
- @staticmethod
41
- @once_differentiable
42
- def backward(context, grad_output):
43
- (
44
- value,
45
- value_spatial_shapes,
46
- value_level_start_index,
47
- sampling_locations,
48
- attention_weights,
49
- ) = context.saved_tensors
50
- grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
- value,
52
- value_spatial_shapes,
53
- value_level_start_index,
54
- sampling_locations,
55
- attention_weights,
56
- grad_output,
57
- context.im2col_step,
58
- )
59
-
60
- return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
-
62
-
63
- class MultiScaleDeformableAttention(nn.Module):
64
- def forward(
65
- self,
66
- value: Tensor,
67
- value_spatial_shapes: Tensor,
68
- value_spatial_shapes_list: List[Tuple],
69
- level_start_index: Tensor,
70
- sampling_locations: Tensor,
71
- attention_weights: Tensor,
72
- im2col_step: int,
73
- ):
74
- return MultiScaleDeformableAttentionFunction.apply(
75
- value,
76
- value_spatial_shapes,
77
- level_start_index,
78
- sampling_locations,
79
- attention_weights,
80
- im2col_step,
81
- )
82
-
83
-
84
- __all__ = ["MultiScaleDeformableAttention"]