pr-include-rev-in-flake
#2
by drbh HF Staff - opened
This view is limited to 50 files because it contains too many changes. See the raw diff here.
- .gitattributes +0 -3
- README.md +2 -22
- benchmarks/benchmark.py +0 -250
- build.toml +19 -0
- build/torch210-cu128-x86_64-windows/__init__.py +0 -46
- build/torch210-cu128-x86_64-windows/_ops.py +0 -9
- build/torch210-cu128-x86_64-windows/deformable_detr/__init__.py +0 -26
- build/torch210-cu128-x86_64-windows/layers.py +0 -84
- build/torch210-cu128-x86_64-windows/metadata.json +0 -21
- build/torch210-cxx11-cu126-aarch64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu126-aarch64-linux/metadata.json +0 -20
- build/torch210-cxx11-cu126-x86_64-linux/__init__.py +0 -46
- build/torch210-cxx11-cu126-x86_64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu126-x86_64-linux/layers.py +0 -84
- build/torch210-cxx11-cu126-x86_64-linux/metadata.json +0 -20
- build/torch210-cxx11-cu128-aarch64-linux/__init__.py +0 -46
- build/torch210-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_5129df0.abi3.so +0 -3
- build/torch210-cxx11-cu128-aarch64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu128-aarch64-linux/layers.py +0 -84
- build/torch210-cxx11-cu128-aarch64-linux/metadata.json +0 -23
- build/torch210-cxx11-cu128-x86_64-linux/__init__.py +0 -46
- build/torch210-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_5129df0.abi3.so +0 -3
- build/torch210-cxx11-cu128-x86_64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu128-x86_64-linux/layers.py +0 -84
- build/torch210-cxx11-cu128-x86_64-linux/metadata.json +0 -23
- build/torch210-cxx11-cu130-aarch64-linux/__init__.py +0 -46
- build/torch210-cxx11-cu130-aarch64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu130-aarch64-linux/layers.py +0 -84
- build/torch210-cxx11-cu130-aarch64-linux/metadata.json +0 -21
- build/torch210-cxx11-cu130-x86_64-linux/__init__.py +0 -46
- build/torch210-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_5129df0.abi3.so +0 -3
- build/torch210-cxx11-cu130-x86_64-linux/_ops.py +0 -9
- build/torch210-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py +0 -26
- build/torch210-cxx11-cu130-x86_64-linux/layers.py +0 -84
- build/torch210-cxx11-cu130-x86_64-linux/metadata.json +0 -21
- build/torch211-cu128-x86_64-windows/__init__.py +0 -46
- build/torch211-cu128-x86_64-windows/_deformable_detr_cuda_a4c5c25.pyd +0 -3
- build/torch211-cu128-x86_64-windows/_ops.py +0 -9
- build/torch211-cu128-x86_64-windows/deformable_detr/__init__.py +0 -26
- build/torch211-cu128-x86_64-windows/layers.py +0 -84
- build/torch211-cu128-x86_64-windows/metadata.json +0 -21
- build/torch211-cxx11-cu126-aarch64-linux/__init__.py +0 -46
- build/torch211-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_5129df0.abi3.so +0 -3
- build/torch211-cxx11-cu126-aarch64-linux/_ops.py +0 -9
- build/torch211-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py +0 -26
.gitattributes
CHANGED
|
@@ -1,4 +1 @@
|
|
| 1 |
*.so filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
build/torch210-cu128-x86_64-windows/_deformable_detr_cuda_d8a6191.pyd filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
build/torch211-cu128-x86_64-windows/_deformable_detr_cuda_ff2cd18.pyd filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
build/torch211-cu128-x86_64-windows/_deformable_detr_cuda_a4c5c25.pyd filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,26 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
library_name: kernels
|
| 3 |
license: apache-2.0
|
|
|
|
|
|
|
| 4 |
---
|
| 5 |
|
| 6 |
-
This is the repository card of kernels-community/deformable-detr that has been pushed on the Hub. It was built to be used with the [`kernels` library](https://github.com/huggingface/kernels). This card was automatically generated.
|
| 7 |
-
|
| 8 |
-
## How to use
|
| 9 |
-
|
| 10 |
-
```python
|
| 11 |
-
# make sure `kernels` is installed: `pip install -U kernels`
|
| 12 |
-
from kernels import get_kernel
|
| 13 |
-
|
| 14 |
-
kernel_module = get_kernel("kernels-community/deformable-detr")
|
| 15 |
-
ms_deform_attn_forward = kernel_module.ms_deform_attn_forward
|
| 16 |
-
|
| 17 |
-
ms_deform_attn_forward(...)
|
| 18 |
-
```
|
| 19 |
-
|
| 20 |
-
## Available functions
|
| 21 |
-
- `ms_deform_attn_forward`
|
| 22 |
-
- `ms_deform_attn_backward`
|
| 23 |
-
|
| 24 |
-
## Benchmarks
|
| 25 |
-
|
| 26 |
-
Benchmarking script is available for this kernel. Run `kernels benchmark kernels-community/deformable-detr`.
|
|
|
|
| 1 |
---
|
|
|
|
| 2 |
license: apache-2.0
|
| 3 |
+
tags:
|
| 4 |
+
- kernel
|
| 5 |
---
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
benchmarks/benchmark.py
DELETED
|
@@ -1,250 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn.functional as F
|
| 3 |
-
|
| 4 |
-
from kernels.benchmark import Benchmark
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
def ms_deform_attn_reference(
|
| 8 |
-
value: torch.Tensor,
|
| 9 |
-
spatial_shapes: torch.Tensor,
|
| 10 |
-
level_start_index: torch.Tensor,
|
| 11 |
-
sampling_locations: torch.Tensor,
|
| 12 |
-
attention_weights: torch.Tensor,
|
| 13 |
-
) -> torch.Tensor:
|
| 14 |
-
batch, _, num_heads, channels = value.shape
|
| 15 |
-
_, num_query, _, num_levels, num_points, _ = sampling_locations.shape
|
| 16 |
-
|
| 17 |
-
# Split value by levels
|
| 18 |
-
value_list = []
|
| 19 |
-
for level_id in range(num_levels):
|
| 20 |
-
H, W = spatial_shapes[level_id]
|
| 21 |
-
start_idx = level_start_index[level_id]
|
| 22 |
-
end_idx = (
|
| 23 |
-
level_start_index[level_id + 1]
|
| 24 |
-
if level_id < num_levels - 1
|
| 25 |
-
else value.shape[1]
|
| 26 |
-
)
|
| 27 |
-
# (batch, H*W, num_heads, channels) -> (batch, num_heads, channels, H, W)
|
| 28 |
-
value_level = value[:, start_idx:end_idx, :, :].view(
|
| 29 |
-
batch, H, W, num_heads, channels
|
| 30 |
-
)
|
| 31 |
-
value_level = value_level.permute(0, 3, 4, 1, 2).contiguous()
|
| 32 |
-
value_list.append(value_level)
|
| 33 |
-
|
| 34 |
-
# Sample from each level
|
| 35 |
-
output = torch.zeros(
|
| 36 |
-
batch, num_query, num_heads, channels, device=value.device, dtype=value.dtype
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
for level_id in range(num_levels):
|
| 40 |
-
H, W = spatial_shapes[level_id]
|
| 41 |
-
value_level = value_list[level_id] # (batch, num_heads, channels, H, W)
|
| 42 |
-
|
| 43 |
-
# Get sampling locations for this level: (batch, num_query, num_heads, num_points, 2)
|
| 44 |
-
sampling_loc_level = sampling_locations[:, :, :, level_id, :, :]
|
| 45 |
-
|
| 46 |
-
# Convert from [0, 1] to [-1, 1] for grid_sample
|
| 47 |
-
grid = (
|
| 48 |
-
2.0 * sampling_loc_level - 1.0
|
| 49 |
-
) # (batch, num_query, num_heads, num_points, 2)
|
| 50 |
-
|
| 51 |
-
# Reshape for grid_sample: need (batch * num_heads, channels, H, W) and (batch * num_heads, num_query, num_points, 2)
|
| 52 |
-
value_level = value_level.view(batch * num_heads, channels, H.item(), W.item())
|
| 53 |
-
grid = grid.permute(
|
| 54 |
-
0, 2, 1, 3, 4
|
| 55 |
-
).contiguous() # (batch, num_heads, num_query, num_points, 2)
|
| 56 |
-
grid = grid.view(batch * num_heads, num_query, num_points, 2)
|
| 57 |
-
|
| 58 |
-
# Sample: output is (batch * num_heads, channels, num_query, num_points)
|
| 59 |
-
sampled = F.grid_sample(
|
| 60 |
-
value_level,
|
| 61 |
-
grid,
|
| 62 |
-
mode="bilinear",
|
| 63 |
-
padding_mode="zeros",
|
| 64 |
-
align_corners=False,
|
| 65 |
-
)
|
| 66 |
-
|
| 67 |
-
# Reshape back: (batch, num_heads, channels, num_query, num_points)
|
| 68 |
-
sampled = sampled.view(batch, num_heads, channels, num_query, num_points)
|
| 69 |
-
# -> (batch, num_query, num_heads, num_points, channels)
|
| 70 |
-
sampled = sampled.permute(0, 3, 1, 4, 2).contiguous()
|
| 71 |
-
|
| 72 |
-
# Get attention weights for this level: (batch, num_query, num_heads, num_points)
|
| 73 |
-
attn_level = attention_weights[:, :, :, level_id, :]
|
| 74 |
-
|
| 75 |
-
# Weighted sum over points: (batch, num_query, num_heads, channels)
|
| 76 |
-
output += (sampled * attn_level.unsqueeze(-1)).sum(dim=3)
|
| 77 |
-
|
| 78 |
-
# Reshape to (batch, num_query, num_heads * channels)
|
| 79 |
-
output = output.view(batch, num_query, num_heads * channels)
|
| 80 |
-
return output
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
class MSDeformAttnBenchmark(Benchmark):
|
| 84 |
-
seed: int = 42
|
| 85 |
-
|
| 86 |
-
def setup(self):
|
| 87 |
-
batch = 2
|
| 88 |
-
num_heads = 8
|
| 89 |
-
channels = 32 # embed_dim = num_heads * channels = 256
|
| 90 |
-
num_levels = 4
|
| 91 |
-
num_query = 300
|
| 92 |
-
num_points = 4
|
| 93 |
-
im2col_step = 64
|
| 94 |
-
|
| 95 |
-
# Spatial shapes for 4 levels: 64x64, 32x32, 16x16, 8x8
|
| 96 |
-
spatial_shapes = torch.tensor(
|
| 97 |
-
[[64, 64], [32, 32], [16, 16], [8, 8]],
|
| 98 |
-
dtype=torch.int64,
|
| 99 |
-
device=self.device,
|
| 100 |
-
)
|
| 101 |
-
# Calculate spatial_size = sum of H*W for all levels
|
| 102 |
-
spatial_size = (64 * 64) + (32 * 32) + (16 * 16) + (8 * 8) # 5440
|
| 103 |
-
|
| 104 |
-
# Level start indices
|
| 105 |
-
level_start_index = torch.tensor(
|
| 106 |
-
[0, 64 * 64, 64 * 64 + 32 * 32, 64 * 64 + 32 * 32 + 16 * 16],
|
| 107 |
-
dtype=torch.int64,
|
| 108 |
-
device=self.device,
|
| 109 |
-
)
|
| 110 |
-
|
| 111 |
-
self.value = torch.randn(
|
| 112 |
-
batch,
|
| 113 |
-
spatial_size,
|
| 114 |
-
num_heads,
|
| 115 |
-
channels,
|
| 116 |
-
device=self.device,
|
| 117 |
-
dtype=torch.float32,
|
| 118 |
-
)
|
| 119 |
-
self.spatial_shapes = spatial_shapes
|
| 120 |
-
self.level_start_index = level_start_index
|
| 121 |
-
self.sampling_loc = torch.rand(
|
| 122 |
-
batch,
|
| 123 |
-
num_query,
|
| 124 |
-
num_heads,
|
| 125 |
-
num_levels,
|
| 126 |
-
num_points,
|
| 127 |
-
2,
|
| 128 |
-
device=self.device,
|
| 129 |
-
dtype=torch.float32,
|
| 130 |
-
)
|
| 131 |
-
self.attn_weight = torch.rand(
|
| 132 |
-
batch,
|
| 133 |
-
num_query,
|
| 134 |
-
num_heads,
|
| 135 |
-
num_levels,
|
| 136 |
-
num_points,
|
| 137 |
-
device=self.device,
|
| 138 |
-
dtype=torch.float32,
|
| 139 |
-
)
|
| 140 |
-
# Normalize attention weights
|
| 141 |
-
self.attn_weight = self.attn_weight / self.attn_weight.sum(-1, keepdim=True)
|
| 142 |
-
self.im2col_step = im2col_step
|
| 143 |
-
|
| 144 |
-
self.out = torch.empty(
|
| 145 |
-
batch,
|
| 146 |
-
num_query,
|
| 147 |
-
num_heads * channels,
|
| 148 |
-
device=self.device,
|
| 149 |
-
dtype=torch.float32,
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
-
def benchmark_forward(self):
|
| 153 |
-
self.out = self.kernel.ms_deform_attn_forward(
|
| 154 |
-
self.value,
|
| 155 |
-
self.spatial_shapes,
|
| 156 |
-
self.level_start_index,
|
| 157 |
-
self.sampling_loc,
|
| 158 |
-
self.attn_weight,
|
| 159 |
-
self.im2col_step,
|
| 160 |
-
)
|
| 161 |
-
|
| 162 |
-
def verify_forward(self) -> torch.Tensor:
|
| 163 |
-
return ms_deform_attn_reference(
|
| 164 |
-
self.value,
|
| 165 |
-
self.spatial_shapes,
|
| 166 |
-
self.level_start_index,
|
| 167 |
-
self.sampling_loc,
|
| 168 |
-
self.attn_weight,
|
| 169 |
-
)
|
| 170 |
-
|
| 171 |
-
def setup_large(self):
|
| 172 |
-
batch = 8
|
| 173 |
-
num_heads = 8
|
| 174 |
-
channels = 32
|
| 175 |
-
num_levels = 4
|
| 176 |
-
num_query = 900
|
| 177 |
-
num_points = 4
|
| 178 |
-
im2col_step = 64
|
| 179 |
-
|
| 180 |
-
spatial_shapes = torch.tensor(
|
| 181 |
-
[[64, 64], [32, 32], [16, 16], [8, 8]],
|
| 182 |
-
dtype=torch.int64,
|
| 183 |
-
device=self.device,
|
| 184 |
-
)
|
| 185 |
-
spatial_size = (64 * 64) + (32 * 32) + (16 * 16) + (8 * 8)
|
| 186 |
-
|
| 187 |
-
level_start_index = torch.tensor(
|
| 188 |
-
[0, 64 * 64, 64 * 64 + 32 * 32, 64 * 64 + 32 * 32 + 16 * 16],
|
| 189 |
-
dtype=torch.int64,
|
| 190 |
-
device=self.device,
|
| 191 |
-
)
|
| 192 |
-
|
| 193 |
-
self.value = torch.randn(
|
| 194 |
-
batch,
|
| 195 |
-
spatial_size,
|
| 196 |
-
num_heads,
|
| 197 |
-
channels,
|
| 198 |
-
device=self.device,
|
| 199 |
-
dtype=torch.float32,
|
| 200 |
-
)
|
| 201 |
-
self.spatial_shapes = spatial_shapes
|
| 202 |
-
self.level_start_index = level_start_index
|
| 203 |
-
self.sampling_loc = torch.rand(
|
| 204 |
-
batch,
|
| 205 |
-
num_query,
|
| 206 |
-
num_heads,
|
| 207 |
-
num_levels,
|
| 208 |
-
num_points,
|
| 209 |
-
2,
|
| 210 |
-
device=self.device,
|
| 211 |
-
dtype=torch.float32,
|
| 212 |
-
)
|
| 213 |
-
self.attn_weight = torch.rand(
|
| 214 |
-
batch,
|
| 215 |
-
num_query,
|
| 216 |
-
num_heads,
|
| 217 |
-
num_levels,
|
| 218 |
-
num_points,
|
| 219 |
-
device=self.device,
|
| 220 |
-
dtype=torch.float32,
|
| 221 |
-
)
|
| 222 |
-
self.attn_weight = self.attn_weight / self.attn_weight.sum(-1, keepdim=True)
|
| 223 |
-
self.im2col_step = im2col_step
|
| 224 |
-
|
| 225 |
-
self.out = torch.empty(
|
| 226 |
-
batch,
|
| 227 |
-
num_query,
|
| 228 |
-
num_heads * channels,
|
| 229 |
-
device=self.device,
|
| 230 |
-
dtype=torch.float32,
|
| 231 |
-
)
|
| 232 |
-
|
| 233 |
-
def benchmark_large(self):
|
| 234 |
-
self.out = self.kernel.ms_deform_attn_forward(
|
| 235 |
-
self.value,
|
| 236 |
-
self.spatial_shapes,
|
| 237 |
-
self.level_start_index,
|
| 238 |
-
self.sampling_loc,
|
| 239 |
-
self.attn_weight,
|
| 240 |
-
self.im2col_step,
|
| 241 |
-
)
|
| 242 |
-
|
| 243 |
-
def verify_large(self) -> torch.Tensor:
|
| 244 |
-
return ms_deform_attn_reference(
|
| 245 |
-
self.value,
|
| 246 |
-
self.spatial_shapes,
|
| 247 |
-
self.level_start_index,
|
| 248 |
-
self.sampling_loc,
|
| 249 |
-
self.attn_weight,
|
| 250 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build.toml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[general]
|
| 2 |
+
name = "deformable_detr"
|
| 3 |
+
|
| 4 |
+
[torch]
|
| 5 |
+
src = [
|
| 6 |
+
"torch-ext/torch_binding.cpp",
|
| 7 |
+
"torch-ext/torch_binding.h"
|
| 8 |
+
]
|
| 9 |
+
|
| 10 |
+
[kernel.activation]
|
| 11 |
+
cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ]
|
| 12 |
+
src = [
|
| 13 |
+
"deformable_detr/ms_deform_attn_cuda.cu",
|
| 14 |
+
"deformable_detr/ms_deform_im2col_cuda.cuh",
|
| 15 |
+
"deformable_detr/ms_deform_attn_cuda.cuh",
|
| 16 |
+
"deformable_detr/ms_deform_attn_cuda.h",
|
| 17 |
+
]
|
| 18 |
+
include = ["."]
|
| 19 |
+
depends = [ "torch" ]
|
build/torch210-cu128-x86_64-windows/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cu128-x86_64-windows/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_d8a6191
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_d8a6191
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_d8a6191::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cu128-x86_64-windows/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
import importlib
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
from types import ModuleType
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cu128-x86_64-windows/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cu128-x86_64-windows/metadata.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"version": 1,
|
| 3 |
-
"license": "Apache-2.0",
|
| 4 |
-
"python-depends": [],
|
| 5 |
-
"backend": {
|
| 6 |
-
"type": "cuda",
|
| 7 |
-
"archs": [
|
| 8 |
-
"10.0",
|
| 9 |
-
"10.1",
|
| 10 |
-
"12.0+PTX",
|
| 11 |
-
"7.0",
|
| 12 |
-
"7.2",
|
| 13 |
-
"7.5",
|
| 14 |
-
"8.0",
|
| 15 |
-
"8.6",
|
| 16 |
-
"8.7",
|
| 17 |
-
"8.9",
|
| 18 |
-
"9.0"
|
| 19 |
-
]
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-aarch64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-aarch64-linux/metadata.json
DELETED
|
@@ -1,20 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"7.0",
|
| 11 |
-
"7.2",
|
| 12 |
-
"7.5",
|
| 13 |
-
"8.0",
|
| 14 |
-
"8.6",
|
| 15 |
-
"8.7",
|
| 16 |
-
"8.9",
|
| 17 |
-
"9.0+PTX"
|
| 18 |
-
]
|
| 19 |
-
}
|
| 20 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-x86_64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-x86_64-linux/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu126-x86_64-linux/metadata.json
DELETED
|
@@ -1,20 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"7.0",
|
| 11 |
-
"7.2",
|
| 12 |
-
"7.5",
|
| 13 |
-
"8.0",
|
| 14 |
-
"8.6",
|
| 15 |
-
"8.7",
|
| 16 |
-
"8.9",
|
| 17 |
-
"9.0+PTX"
|
| 18 |
-
]
|
| 19 |
-
}
|
| 20 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_5129df0.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:7e47fb27665b884b9862ad0c7b172c518e858894ab76aa850b35456c2aaeefc7
|
| 3 |
-
size 11621120
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-aarch64-linux/metadata.json
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"10.0",
|
| 11 |
-
"10.1",
|
| 12 |
-
"12.0+PTX",
|
| 13 |
-
"7.0",
|
| 14 |
-
"7.2",
|
| 15 |
-
"7.5",
|
| 16 |
-
"8.0",
|
| 17 |
-
"8.6",
|
| 18 |
-
"8.7",
|
| 19 |
-
"8.9",
|
| 20 |
-
"9.0"
|
| 21 |
-
]
|
| 22 |
-
}
|
| 23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_5129df0.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f6144f094d925d8d1f187bef56134f21353df4b62e5cf3ba9c6a60a059630400
|
| 3 |
-
size 11524560
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu128-x86_64-linux/metadata.json
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"10.0",
|
| 11 |
-
"10.1",
|
| 12 |
-
"12.0+PTX",
|
| 13 |
-
"7.0",
|
| 14 |
-
"7.2",
|
| 15 |
-
"7.5",
|
| 16 |
-
"8.0",
|
| 17 |
-
"8.6",
|
| 18 |
-
"8.7",
|
| 19 |
-
"8.9",
|
| 20 |
-
"9.0"
|
| 21 |
-
]
|
| 22 |
-
}
|
| 23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-aarch64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-aarch64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-aarch64-linux/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-aarch64-linux/metadata.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"10.0",
|
| 11 |
-
"11.0",
|
| 12 |
-
"12.0+PTX",
|
| 13 |
-
"7.5",
|
| 14 |
-
"8.0",
|
| 15 |
-
"8.6",
|
| 16 |
-
"8.7",
|
| 17 |
-
"8.9",
|
| 18 |
-
"9.0"
|
| 19 |
-
]
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_5129df0.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:dc924d4f8fa8b6375abed86185db3e07118fdc9387fde2faa6199c0af810476a
|
| 3 |
-
size 9809000
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cu130-x86_64-linux/metadata.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"name": "deformable-detr",
|
| 3 |
-
"id": "_deformable_detr_cuda_5129df0",
|
| 4 |
-
"version": 1,
|
| 5 |
-
"license": "Apache-2.0",
|
| 6 |
-
"python-depends": [],
|
| 7 |
-
"backend": {
|
| 8 |
-
"type": "cuda",
|
| 9 |
-
"archs": [
|
| 10 |
-
"10.0",
|
| 11 |
-
"11.0",
|
| 12 |
-
"12.0+PTX",
|
| 13 |
-
"7.5",
|
| 14 |
-
"8.0",
|
| 15 |
-
"8.6",
|
| 16 |
-
"8.7",
|
| 17 |
-
"8.9",
|
| 18 |
-
"9.0"
|
| 19 |
-
]
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/_deformable_detr_cuda_a4c5c25.pyd
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:6901592b0cde22932ed33bd9357fdbff9747d916c56fc25ffa455d7bb575bef5
|
| 3 |
-
size 9546240
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_a4c5c25
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_a4c5c25
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_a4c5c25::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/layers.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from typing import List, Union, Tuple
|
| 2 |
-
|
| 3 |
-
from torch import Tensor
|
| 4 |
-
from torch.autograd import Function
|
| 5 |
-
from torch.autograd.function import once_differentiable
|
| 6 |
-
import torch.nn as nn
|
| 7 |
-
|
| 8 |
-
from ._ops import ops
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
-
@staticmethod
|
| 13 |
-
def forward(
|
| 14 |
-
context,
|
| 15 |
-
value: Tensor,
|
| 16 |
-
value_spatial_shapes: Tensor,
|
| 17 |
-
value_level_start_index: Tensor,
|
| 18 |
-
sampling_locations: Tensor,
|
| 19 |
-
attention_weights: Tensor,
|
| 20 |
-
im2col_step: int,
|
| 21 |
-
):
|
| 22 |
-
context.im2col_step = im2col_step
|
| 23 |
-
output = ops.ms_deform_attn_forward(
|
| 24 |
-
value,
|
| 25 |
-
value_spatial_shapes,
|
| 26 |
-
value_level_start_index,
|
| 27 |
-
sampling_locations,
|
| 28 |
-
attention_weights,
|
| 29 |
-
context.im2col_step,
|
| 30 |
-
)
|
| 31 |
-
context.save_for_backward(
|
| 32 |
-
value,
|
| 33 |
-
value_spatial_shapes,
|
| 34 |
-
value_level_start_index,
|
| 35 |
-
sampling_locations,
|
| 36 |
-
attention_weights,
|
| 37 |
-
)
|
| 38 |
-
return output
|
| 39 |
-
|
| 40 |
-
@staticmethod
|
| 41 |
-
@once_differentiable
|
| 42 |
-
def backward(context, grad_output):
|
| 43 |
-
(
|
| 44 |
-
value,
|
| 45 |
-
value_spatial_shapes,
|
| 46 |
-
value_level_start_index,
|
| 47 |
-
sampling_locations,
|
| 48 |
-
attention_weights,
|
| 49 |
-
) = context.saved_tensors
|
| 50 |
-
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
-
value,
|
| 52 |
-
value_spatial_shapes,
|
| 53 |
-
value_level_start_index,
|
| 54 |
-
sampling_locations,
|
| 55 |
-
attention_weights,
|
| 56 |
-
grad_output,
|
| 57 |
-
context.im2col_step,
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
-
def forward(
|
| 65 |
-
self,
|
| 66 |
-
value: Tensor,
|
| 67 |
-
value_spatial_shapes: Tensor,
|
| 68 |
-
value_spatial_shapes_list: List[Tuple],
|
| 69 |
-
level_start_index: Tensor,
|
| 70 |
-
sampling_locations: Tensor,
|
| 71 |
-
attention_weights: Tensor,
|
| 72 |
-
im2col_step: int,
|
| 73 |
-
):
|
| 74 |
-
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
-
value,
|
| 76 |
-
value_spatial_shapes,
|
| 77 |
-
level_start_index,
|
| 78 |
-
sampling_locations,
|
| 79 |
-
attention_weights,
|
| 80 |
-
im2col_step,
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
__all__ = ["MultiScaleDeformableAttention"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cu128-x86_64-windows/metadata.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"version": 1,
|
| 3 |
-
"license": "Apache-2.0",
|
| 4 |
-
"python-depends": [],
|
| 5 |
-
"backend": {
|
| 6 |
-
"type": "cuda",
|
| 7 |
-
"archs": [
|
| 8 |
-
"10.0",
|
| 9 |
-
"10.1",
|
| 10 |
-
"12.0+PTX",
|
| 11 |
-
"7.0",
|
| 12 |
-
"7.2",
|
| 13 |
-
"7.5",
|
| 14 |
-
"8.0",
|
| 15 |
-
"8.6",
|
| 16 |
-
"8.7",
|
| 17 |
-
"8.9",
|
| 18 |
-
"9.0"
|
| 19 |
-
]
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cxx11-cu126-aarch64-linux/__init__.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def ms_deform_attn_backward(
|
| 9 |
-
value: torch.Tensor,
|
| 10 |
-
spatial_shapes: torch.Tensor,
|
| 11 |
-
level_start_index: torch.Tensor,
|
| 12 |
-
sampling_loc: torch.Tensor,
|
| 13 |
-
attn_weight: torch.Tensor,
|
| 14 |
-
grad_output: torch.Tensor,
|
| 15 |
-
im2col_step: int,
|
| 16 |
-
) -> List[torch.Tensor]:
|
| 17 |
-
return ops.ms_deform_attn_backward(
|
| 18 |
-
value,
|
| 19 |
-
spatial_shapes,
|
| 20 |
-
level_start_index,
|
| 21 |
-
sampling_loc,
|
| 22 |
-
attn_weight,
|
| 23 |
-
grad_output,
|
| 24 |
-
im2col_step,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def ms_deform_attn_forward(
|
| 29 |
-
value: torch.Tensor,
|
| 30 |
-
spatial_shapes: torch.Tensor,
|
| 31 |
-
level_start_index: torch.Tensor,
|
| 32 |
-
sampling_loc: torch.Tensor,
|
| 33 |
-
attn_weight: torch.Tensor,
|
| 34 |
-
im2col_step: int,
|
| 35 |
-
) -> torch.Tensor:
|
| 36 |
-
return ops.ms_deform_attn_forward(
|
| 37 |
-
value,
|
| 38 |
-
spatial_shapes,
|
| 39 |
-
level_start_index,
|
| 40 |
-
sampling_loc,
|
| 41 |
-
attn_weight,
|
| 42 |
-
im2col_step,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_5129df0.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:cb34add17209b87b62eb02c5d67e67cc566779a48bfe31080fa48f59c3102052
|
| 3 |
-
size 8606480
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cxx11-cu126-aarch64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _deformable_detr_cuda_5129df0
|
| 3 |
-
ops = torch.ops._deformable_detr_cuda_5129df0
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_deformable_detr_cuda_5129df0::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch211-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import importlib.util
|
| 3 |
-
import sys
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from types import ModuleType
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|