Spaces:
Runtime error
Runtime error
| # coding=utf-8 | |
| # Copyright 2022 The IDEA Authors. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| class DeformablePositionEmbeddingSine(nn.Module): | |
| """Position Embedding used in Deformable-DETR""" | |
| def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): | |
| super().__init__() | |
| self.num_pos_feats = num_pos_feats | |
| self.temperature = temperature | |
| self.normalize = normalize | |
| if scale is not None and normalize is False: | |
| raise ValueError("normalize should be True if scale is passed") | |
| if scale is None: | |
| scale = 2 * math.pi | |
| self.scale = scale | |
| def forward(self, mask): | |
| assert mask is not None | |
| not_mask = ~mask | |
| y_embed = not_mask.cumsum(1, dtype=torch.float32) | |
| x_embed = not_mask.cumsum(2, dtype=torch.float32) | |
| if self.normalize: | |
| eps = 1e-6 | |
| y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale | |
| x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale | |
| dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device) | |
| dim_t = self.temperature ** ( | |
| 2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats | |
| ) | |
| pos_x = x_embed[:, :, :, None] / dim_t | |
| pos_y = y_embed[:, :, :, None] / dim_t | |
| pos_x = torch.stack( | |
| (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 | |
| ).flatten(3) | |
| pos_y = torch.stack( | |
| (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 | |
| ).flatten(3) | |
| pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) | |
| return pos | |
| class DABPositionEmbeddingSine(nn.Module): | |
| """Position Embedding used in DAB-DETR""" | |
| def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): | |
| super().__init__() | |
| self.num_pos_feats = num_pos_feats | |
| self.temperature = temperature | |
| self.normalize = normalize | |
| if scale is not None and normalize is False: | |
| raise ValueError("normalize should be True if scale is passed") | |
| if scale is None: | |
| scale = 2 * math.pi | |
| self.scale = scale | |
| def forward(self, mask): | |
| assert mask is not None | |
| not_mask = ~mask | |
| y_embed = not_mask.cumsum(1, dtype=torch.float32) | |
| x_embed = not_mask.cumsum(2, dtype=torch.float32) | |
| if self.normalize: | |
| eps = 1e-6 | |
| y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale | |
| x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale | |
| dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device) | |
| dim_t = self.temperature ** ( | |
| 2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats | |
| ) | |
| pos_x = x_embed[:, :, :, None] / dim_t | |
| pos_y = y_embed[:, :, :, None] / dim_t | |
| pos_x = torch.stack( | |
| (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 | |
| ).flatten(3) | |
| pos_y = torch.stack( | |
| (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 | |
| ).flatten(3) | |
| pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) | |
| return pos | |
| class DABPositionEmbeddingLearned(nn.Module): | |
| """Position Embedding Learned used in DAB-DETR""" | |
| def __init__(self, num_pos_feats=256): | |
| super().__init__() | |
| self.row_embed = nn.Embedding(50, num_pos_feats) | |
| self.col_embed = nn.Embedding(50, num_pos_feats) | |
| self.reset_parameters() | |
| def reset_parameters(self): | |
| nn.init.uniform_(self.row_embed.weight) | |
| nn.init.uniform_(self.col_embed.weight) | |
| def forward(self, mask): | |
| h, w = mask.shape[-2:] | |
| i = torch.arange(w, device=mask.device) | |
| j = torch.arange(h, device=mask.device) | |
| x_emb = self.col_embed(i) | |
| y_emb = self.row_embed(j) | |
| pos = ( | |
| torch.cat( | |
| [ | |
| x_emb.unsqueeze(0).repeat(h, 1, 1), | |
| y_emb.unsqueeze(1).repeat(1, w, 1), | |
| ], | |
| dim=-1, | |
| ) | |
| .permute(2, 0, 1) | |
| .unsqueeze(0) | |
| .repeat(mask.shape[0], 1, 1, 1) | |
| ) | |
| return pos | |