id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
12,906 | import torch
import torch.nn as nn
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs) | r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,907 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs) | r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,908 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet101` function. Write a Python function `def resnet101(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs) | r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,909 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet152` function. Write a Python function `def resnet152(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs) | r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,910 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnext50_32x4d` function. Write a Python function `def resnext50_32x4d(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) | r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,911 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnext101_32x8d` function. Write a Python function `def resnext101_32x8d(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,912 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `wide_resnet50_2` function. Write a Python function `def wide_resnet50_2(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) | r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,913 | import torch
import torch.nn as nn
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `wide_resnet101_2` function. Write a Python function `def wide_resnet101_2(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,914 | import torch
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate(feat, uv)` to solve the following problem:
:param feat: [B, C, H, W] image features :param uv: [B, 2, N] uv coordinates in the image plane, range [-1, 1] :return: [B, C, N] image features at the uv coordinates
Here is the function:
def interpolate(feat, uv):
'''
:param feat: [B, C, H, W] image features
:param uv: [B, 2, N] uv coordinates in the image plane, range [-1, 1]
:return: [B, C, N] image features at the uv coordinates
'''
if uv.shape[-1] != 2:
uv = uv.transpose(1, 2) # [B, N, 2]
uv = uv.unsqueeze(2) # [B, N, 1, 2]
# NOTE: for newer PyTorch, it seems that training results are degraded due to implementation diff in F.grid_sample
# for old versions, simply remove the aligned_corners argument.
if int(torch.__version__.split('.')[1]) < 4:
samples = torch.nn.functional.grid_sample(feat, uv) # [B, C, N, 1]
else:
samples = torch.nn.functional.grid_sample(feat, uv, align_corners=True) # [B, C, N, 1]
return samples[:, :, :, 0] # [B, C, N] | :param feat: [B, C, H, W] image features :param uv: [B, 2, N] uv coordinates in the image plane, range [-1, 1] :return: [B, C, N] image features at the uv coordinates |
12,915 | import torch
import torch.nn.functional as F
def _softmax(tensor, temperature, dim=-1):
return F.softmax(tensor * temperature, dim=dim)
def softargmax1d(
heatmaps,
temperature=None,
normalize_keypoints=True,
):
dtype, device = heatmaps.dtype, heatmaps.device
if temperature is None:
temperature = torch.tensor(1.0, dtype=dtype, device=device)
batch_size, num_channels, dim = heatmaps.shape
points = torch.arange(0, dim, device=device, dtype=dtype).reshape(1, 1, dim).expand(batch_size, -1, -1)
# y = torch.arange(0, height, device=device, dtype=dtype).reshape(1, 1, height, 1).expand(batch_size, -1, -1, width)
# Should be Bx2xHxW
# points = torch.cat([x, y], dim=1)
normalized_heatmap = _softmax(
heatmaps.reshape(batch_size, num_channels, -1),
temperature=temperature.reshape(1, -1, 1),
dim=-1)
# Should be BxJx2
keypoints = (normalized_heatmap.reshape(batch_size, -1, dim) * points).sum(dim=-1)
if normalize_keypoints:
# Normalize keypoints to [-1, 1]
keypoints = (keypoints / (dim - 1) * 2 - 1)
return keypoints, normalized_heatmap.reshape(
batch_size, -1, dim) | null |
12,916 | import torch
import torch.nn.functional as F
def _softmax(tensor, temperature, dim=-1):
return F.softmax(tensor * temperature, dim=dim)
def softargmax2d(
heatmaps,
temperature=None,
normalize_keypoints=True,
):
dtype, device = heatmaps.dtype, heatmaps.device
if temperature is None:
temperature = torch.tensor(1.0, dtype=dtype, device=device)
batch_size, num_channels, height, width = heatmaps.shape
x = torch.arange(0, width, device=device, dtype=dtype).reshape(1, 1, 1, width).expand(batch_size, -1, height, -1)
y = torch.arange(0, height, device=device, dtype=dtype).reshape(1, 1, height, 1).expand(batch_size, -1, -1, width)
# Should be Bx2xHxW
points = torch.cat([x, y], dim=1)
normalized_heatmap = _softmax(
heatmaps.reshape(batch_size, num_channels, -1),
temperature=temperature.reshape(1, -1, 1),
dim=-1)
# Should be BxJx2
keypoints = (
normalized_heatmap.reshape(batch_size, -1, 1, height * width) *
points.reshape(batch_size, 1, 2, -1)).sum(dim=-1)
if normalize_keypoints:
# Normalize keypoints to [-1, 1]
keypoints[:, :, 0] = (keypoints[:, :, 0] / (width - 1) * 2 - 1)
keypoints[:, :, 1] = (keypoints[:, :, 1] / (height - 1) * 2 - 1)
return keypoints, normalized_heatmap.reshape(
batch_size, -1, height, width) | null |
12,917 | import torch
import torch.nn.functional as F
def _softmax(tensor, temperature, dim=-1):
return F.softmax(tensor * temperature, dim=dim)
def softargmax3d(
heatmaps,
temperature=None,
normalize_keypoints=True,
):
dtype, device = heatmaps.dtype, heatmaps.device
if temperature is None:
temperature = torch.tensor(1.0, dtype=dtype, device=device)
batch_size, num_channels, height, width, depth = heatmaps.shape
x = torch.arange(0, width, device=device, dtype=dtype).reshape(1, 1, 1, width, 1).expand(batch_size, -1, height, -1, depth)
y = torch.arange(0, height, device=device, dtype=dtype).reshape(1, 1, height, 1, 1).expand(batch_size, -1, -1, width, depth)
z = torch.arange(0, depth, device=device, dtype=dtype).reshape(1, 1, 1, 1, depth).expand(batch_size, -1, height, width, -1)
# Should be Bx2xHxW
points = torch.cat([x, y, z], dim=1)
normalized_heatmap = _softmax(
heatmaps.reshape(batch_size, num_channels, -1),
temperature=temperature.reshape(1, -1, 1),
dim=-1)
# Should be BxJx3
keypoints = (
normalized_heatmap.reshape(batch_size, -1, 1, height * width * depth) *
points.reshape(batch_size, 1, 3, -1)).sum(dim=-1)
if normalize_keypoints:
# Normalize keypoints to [-1, 1]
keypoints[:, :, 0] = (keypoints[:, :, 0] / (width - 1) * 2 - 1)
keypoints[:, :, 1] = (keypoints[:, :, 1] / (height - 1) * 2 - 1)
keypoints[:, :, 2] = (keypoints[:, :, 2] / (depth - 1) * 2 - 1)
return keypoints, normalized_heatmap.reshape(
batch_size, -1, height, width, depth) | null |
12,918 | import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `get_heatmap_preds` function. Write a Python function `def get_heatmap_preds(batch_heatmaps, normalize_keypoints=True)` to solve the following problem:
get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Here is the function:
def get_heatmap_preds(batch_heatmaps, normalize_keypoints=True):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
height = batch_heatmaps.shape[2]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
maxvals, idx = torch.max(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = torch.floor((preds[:, :, 1]) / width)
pred_mask = torch.gt(maxvals, 0.0).repeat(1, 1, 2)
pred_mask = pred_mask.float()
preds *= pred_mask
if normalize_keypoints:
# Normalize keypoints to [-1, 1]
preds[:, :, 0] = (preds[:, :, 0] / (width - 1) * 2 - 1)
preds[:, :, 1] = (preds[:, :, 1] / (height - 1) * 2 - 1)
return preds, maxvals | get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) |
12,919 | import torch
import torch.nn as nn
from .. import config
from .smpl_head import SMPL
The provided code snippet includes necessary dependencies for implementing the `perspective_projection` function. Write a Python function `def perspective_projection(points, rotation, translation, cam_intrinsics)` to solve the following problem:
This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation cam_intrinsics (bs, 3, 3): Camera intrinsics
Here is the function:
def perspective_projection(points, rotation, translation, cam_intrinsics):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
cam_intrinsics (bs, 3, 3): Camera intrinsics
"""
K = cam_intrinsics
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points.float())
return projected_points[:, :, :-1] | This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation cam_intrinsics (bs, 3, 3): Camera intrinsics |
12,920 | import torch
import torch.nn as nn
from .. import config
from .smpl_head import SMPL
def convert_pare_to_full_img_cam(
pare_cam, bbox_height, bbox_center,
img_w, img_h, focal_length, crop_res=224):
# Converts weak perspective camera estimated by PARE in
# bbox coords to perspective camera in full image coordinates
# from https://arxiv.org/pdf/2009.06549.pdf
s, tx, ty = pare_cam[:, 0], pare_cam[:, 1], pare_cam[:, 2]
res = 224
r = bbox_height / res
tz = 2 * focal_length / (r * res * s)
cx = 2 * (bbox_center[:, 0] - (img_w / 2.)) / (s * bbox_height)
cy = 2 * (bbox_center[:, 1] - (img_h / 2.)) / (s * bbox_height)
cam_t = torch.stack([tx + cx, ty + cy, tz], dim=-1)
return cam_t | null |
12,921 | import math
import torch
import numpy as np
import torch.nn as nn
from ..config import SMPL_MEAN_PARAMS
from ..utils.geometry import rot6d_to_rotmat, rotmat_to_rot6d
def keep_variance(x, min_variance):
return x + min_variance | null |
12,922 | import os
import time
import yaml
import shutil
import argparse
import operator
import itertools
from os.path import join
from functools import reduce
from yacs.config import CfgNode as CN
from typing import Dict, List, Union, Any
hparams = CN()
hparams.LOG_DIR = 'logs/experiments'
hparams.METHOD = 'pare'
hparams.EXP_NAME = 'default'
hparams.RUN_TEST = False
hparams.PROJECT_NAME = 'pare'
hparams.SEED_VALUE = -1
hparams.SYSTEM = CN()
hparams.SYSTEM.GPU = ''
hparams.SYSTEM.CLUSTER_NODE = 0.0
hparams.DATASET = CN()
hparams.DATASET.LOAD_TYPE = 'Base'
hparams.DATASET.NOISE_FACTOR = 0.4
hparams.DATASET.ROT_FACTOR = 30
hparams.DATASET.SCALE_FACTOR = 0.25
hparams.DATASET.FLIP_PROB = 0.5
hparams.DATASET.CROP_PROB = 0.0
hparams.DATASET.CROP_FACTOR = 0.0
hparams.DATASET.BATCH_SIZE = 64
hparams.DATASET.NUM_WORKERS = 8
hparams.DATASET.PIN_MEMORY = True
hparams.DATASET.SHUFFLE_TRAIN = True
hparams.DATASET.TRAIN_DS = 'all'
hparams.DATASET.VAL_DS = '3dpw_3doh'
hparams.DATASET.NUM_IMAGES = -1
hparams.DATASET.TRAIN_NUM_IMAGES = -1
hparams.DATASET.TEST_NUM_IMAGES = -1
hparams.DATASET.IMG_RES = 224
hparams.DATASET.USE_HEATMAPS = ''
hparams.DATASET.RENDER_RES = 480
hparams.DATASET.MESH_COLOR = 'pinkish'
hparams.DATASET.FOCAL_LENGTH = 5000.
hparams.DATASET.IGNORE_3D = False
hparams.DATASET.USE_SYNTHETIC_OCCLUSION = False
hparams.DATASET.OCC_AUG_DATASET = 'pascal'
hparams.DATASET.USE_3D_CONF = False
hparams.DATASET.USE_GENDER = False
hparams.DATASET.DATASETS_AND_RATIOS = 'h36m_mpii_lspet_coco_mpi-inf-3dhp_0.3_0.6_0.6_0.6_0.1'
hparams.DATASET.STAGE_DATASETS = '0+h36m_coco_0.2_0.8 2+h36m_coco_0.4_0.6'
hparams.DATASET.NONPARAMETRIC = False
hparams.OPTIMIZER = CN()
hparams.OPTIMIZER.TYPE = 'adam'
hparams.OPTIMIZER.LR = 0.0001
hparams.OPTIMIZER.WD = 0.0
hparams.TRAINING = CN()
hparams.TRAINING.RESUME = None
hparams.TRAINING.PRETRAINED = None
hparams.TRAINING.PRETRAINED_LIT = None
hparams.TRAINING.MAX_EPOCHS = 100
hparams.TRAINING.LOG_SAVE_INTERVAL = 50
hparams.TRAINING.LOG_FREQ_TB_IMAGES = 500
hparams.TRAINING.CHECK_VAL_EVERY_N_EPOCH = 1
hparams.TRAINING.RELOAD_DATALOADERS_EVERY_EPOCH = True
hparams.TRAINING.NUM_SMPLIFY_ITERS = 100
hparams.TRAINING.RUN_SMPLIFY = False
hparams.TRAINING.SMPLIFY_THRESHOLD = 100
hparams.TRAINING.DROPOUT_P = 0.2
hparams.TRAINING.TEST_BEFORE_TRAINING = False
hparams.TRAINING.SAVE_IMAGES = False
hparams.TRAINING.USE_PART_SEGM_LOSS = False
hparams.TRAINING.USE_AMP = False
hparams.TESTING = CN()
hparams.TESTING.SAVE_IMAGES = False
hparams.TESTING.SAVE_FREQ = 1
hparams.TESTING.SAVE_RESULTS = True
hparams.TESTING.SAVE_MESHES = False
hparams.TESTING.SIDEVIEW = True
hparams.TESTING.TEST_ON_TRAIN_END = True
hparams.TESTING.MULTI_SIDEVIEW = False
hparams.TESTING.USE_GT_CAM = False
hparams.PARE = CN()
hparams.PARE.BACKBONE = 'resnet50'
hparams.PARE.NUM_JOINTS = 24
hparams.PARE.SOFTMAX_TEMP = 1.
hparams.PARE.NUM_FEATURES_SMPL = 64
hparams.PARE.USE_ATTENTION = False
hparams.PARE.USE_SELF_ATTENTION = False
hparams.PARE.USE_KEYPOINT_ATTENTION = False
hparams.PARE.USE_KEYPOINT_FEATURES_FOR_SMPL_REGRESSION = False
hparams.PARE.USE_POSTCONV_KEYPOINT_ATTENTION = False
hparams.PARE.KEYPOINT_ATTENTION_ACT = 'softmax'
hparams.PARE.USE_SCALE_KEYPOINT_ATTENTION = False
hparams.PARE.USE_FINAL_NONLOCAL = None
hparams.PARE.USE_BRANCH_NONLOCAL = None
hparams.PARE.USE_HMR_REGRESSION = False
hparams.PARE.USE_COATTENTION = False
hparams.PARE.NUM_COATTENTION_ITER = 1
hparams.PARE.COATTENTION_CONV = 'simple'
hparams.PARE.USE_UPSAMPLING = False
hparams.PARE.DECONV_CONV_KERNEL_SIZE = 4
hparams.PARE.USE_SOFT_ATTENTION = False
hparams.PARE.NUM_BRANCH_ITERATION = 0
hparams.PARE.BRANCH_DEEPER = False
hparams.PARE.NUM_DECONV_LAYERS = 3
hparams.PARE.NUM_DECONV_FILTERS = 256
hparams.PARE.USE_RESNET_CONV_HRNET = False
hparams.PARE.USE_POS_ENC = False
hparams.PARE.ITERATIVE_REGRESSION = False
hparams.PARE.ITER_RESIDUAL = False
hparams.PARE.NUM_ITERATIONS = 3
hparams.PARE.SHAPE_INPUT_TYPE = 'feats.all_pose.shape.cam'
hparams.PARE.POSE_INPUT_TYPE = 'feats.neighbor_pose_feats.all_pose.self_pose.neighbor_pose.shape.cam'
hparams.PARE.POSE_MLP_NUM_LAYERS = 1
hparams.PARE.SHAPE_MLP_NUM_LAYERS = 1
hparams.PARE.POSE_MLP_HIDDEN_SIZE = 256
hparams.PARE.SHAPE_MLP_HIDDEN_SIZE = 256
hparams.PARE.SHAPE_LOSS_WEIGHT = 0
hparams.PARE.KEYPOINT_LOSS_WEIGHT = 5.
hparams.PARE.KEYPOINT_NATIVE_LOSS_WEIGHT = 5.
hparams.PARE.HEATMAPS_LOSS_WEIGHT = 5.
hparams.PARE.SMPL_PART_LOSS_WEIGHT = 1.
hparams.PARE.PART_SEGM_LOSS_WEIGHT = 1.
hparams.PARE.POSE_LOSS_WEIGHT = 1.
hparams.PARE.BETA_LOSS_WEIGHT = 0.001
hparams.PARE.OPENPOSE_TRAIN_WEIGHT = 0.
hparams.PARE.GT_TRAIN_WEIGHT = 1.
hparams.PARE.LOSS_WEIGHT = 60.
hparams.PARE.USE_SHAPE_REG = False
hparams.PARE.USE_MEAN_CAMSHAPE = False
hparams.PARE.USE_MEAN_POSE = False
hparams.PARE.INIT_XAVIER = False
def get_hparams_defaults():
"""Get a yacs hparamsNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return hparams.clone()
def update_hparams(hparams_file):
hparams = get_hparams_defaults()
hparams.merge_from_file(hparams_file)
return hparams.clone() | null |
12,923 | import os
import time
import yaml
import shutil
import argparse
import operator
import itertools
from os.path import join
from functools import reduce
from yacs.config import CfgNode as CN
from typing import Dict, List, Union, Any
hparams = CN()
hparams.LOG_DIR = 'logs/experiments'
hparams.METHOD = 'pare'
hparams.EXP_NAME = 'default'
hparams.RUN_TEST = False
hparams.PROJECT_NAME = 'pare'
hparams.SEED_VALUE = -1
hparams.SYSTEM = CN()
hparams.SYSTEM.GPU = ''
hparams.SYSTEM.CLUSTER_NODE = 0.0
hparams.DATASET = CN()
hparams.DATASET.LOAD_TYPE = 'Base'
hparams.DATASET.NOISE_FACTOR = 0.4
hparams.DATASET.ROT_FACTOR = 30
hparams.DATASET.SCALE_FACTOR = 0.25
hparams.DATASET.FLIP_PROB = 0.5
hparams.DATASET.CROP_PROB = 0.0
hparams.DATASET.CROP_FACTOR = 0.0
hparams.DATASET.BATCH_SIZE = 64
hparams.DATASET.NUM_WORKERS = 8
hparams.DATASET.PIN_MEMORY = True
hparams.DATASET.SHUFFLE_TRAIN = True
hparams.DATASET.TRAIN_DS = 'all'
hparams.DATASET.VAL_DS = '3dpw_3doh'
hparams.DATASET.NUM_IMAGES = -1
hparams.DATASET.TRAIN_NUM_IMAGES = -1
hparams.DATASET.TEST_NUM_IMAGES = -1
hparams.DATASET.IMG_RES = 224
hparams.DATASET.USE_HEATMAPS = ''
hparams.DATASET.RENDER_RES = 480
hparams.DATASET.MESH_COLOR = 'pinkish'
hparams.DATASET.FOCAL_LENGTH = 5000.
hparams.DATASET.IGNORE_3D = False
hparams.DATASET.USE_SYNTHETIC_OCCLUSION = False
hparams.DATASET.OCC_AUG_DATASET = 'pascal'
hparams.DATASET.USE_3D_CONF = False
hparams.DATASET.USE_GENDER = False
hparams.DATASET.DATASETS_AND_RATIOS = 'h36m_mpii_lspet_coco_mpi-inf-3dhp_0.3_0.6_0.6_0.6_0.1'
hparams.DATASET.STAGE_DATASETS = '0+h36m_coco_0.2_0.8 2+h36m_coco_0.4_0.6'
hparams.DATASET.NONPARAMETRIC = False
hparams.OPTIMIZER = CN()
hparams.OPTIMIZER.TYPE = 'adam'
hparams.OPTIMIZER.LR = 0.0001
hparams.OPTIMIZER.WD = 0.0
hparams.TRAINING = CN()
hparams.TRAINING.RESUME = None
hparams.TRAINING.PRETRAINED = None
hparams.TRAINING.PRETRAINED_LIT = None
hparams.TRAINING.MAX_EPOCHS = 100
hparams.TRAINING.LOG_SAVE_INTERVAL = 50
hparams.TRAINING.LOG_FREQ_TB_IMAGES = 500
hparams.TRAINING.CHECK_VAL_EVERY_N_EPOCH = 1
hparams.TRAINING.RELOAD_DATALOADERS_EVERY_EPOCH = True
hparams.TRAINING.NUM_SMPLIFY_ITERS = 100
hparams.TRAINING.RUN_SMPLIFY = False
hparams.TRAINING.SMPLIFY_THRESHOLD = 100
hparams.TRAINING.DROPOUT_P = 0.2
hparams.TRAINING.TEST_BEFORE_TRAINING = False
hparams.TRAINING.SAVE_IMAGES = False
hparams.TRAINING.USE_PART_SEGM_LOSS = False
hparams.TRAINING.USE_AMP = False
hparams.TESTING = CN()
hparams.TESTING.SAVE_IMAGES = False
hparams.TESTING.SAVE_FREQ = 1
hparams.TESTING.SAVE_RESULTS = True
hparams.TESTING.SAVE_MESHES = False
hparams.TESTING.SIDEVIEW = True
hparams.TESTING.TEST_ON_TRAIN_END = True
hparams.TESTING.MULTI_SIDEVIEW = False
hparams.TESTING.USE_GT_CAM = False
hparams.PARE = CN()
hparams.PARE.BACKBONE = 'resnet50'
hparams.PARE.NUM_JOINTS = 24
hparams.PARE.SOFTMAX_TEMP = 1.
hparams.PARE.NUM_FEATURES_SMPL = 64
hparams.PARE.USE_ATTENTION = False
hparams.PARE.USE_SELF_ATTENTION = False
hparams.PARE.USE_KEYPOINT_ATTENTION = False
hparams.PARE.USE_KEYPOINT_FEATURES_FOR_SMPL_REGRESSION = False
hparams.PARE.USE_POSTCONV_KEYPOINT_ATTENTION = False
hparams.PARE.KEYPOINT_ATTENTION_ACT = 'softmax'
hparams.PARE.USE_SCALE_KEYPOINT_ATTENTION = False
hparams.PARE.USE_FINAL_NONLOCAL = None
hparams.PARE.USE_BRANCH_NONLOCAL = None
hparams.PARE.USE_HMR_REGRESSION = False
hparams.PARE.USE_COATTENTION = False
hparams.PARE.NUM_COATTENTION_ITER = 1
hparams.PARE.COATTENTION_CONV = 'simple'
hparams.PARE.USE_UPSAMPLING = False
hparams.PARE.DECONV_CONV_KERNEL_SIZE = 4
hparams.PARE.USE_SOFT_ATTENTION = False
hparams.PARE.NUM_BRANCH_ITERATION = 0
hparams.PARE.BRANCH_DEEPER = False
hparams.PARE.NUM_DECONV_LAYERS = 3
hparams.PARE.NUM_DECONV_FILTERS = 256
hparams.PARE.USE_RESNET_CONV_HRNET = False
hparams.PARE.USE_POS_ENC = False
hparams.PARE.ITERATIVE_REGRESSION = False
hparams.PARE.ITER_RESIDUAL = False
hparams.PARE.NUM_ITERATIONS = 3
hparams.PARE.SHAPE_INPUT_TYPE = 'feats.all_pose.shape.cam'
hparams.PARE.POSE_INPUT_TYPE = 'feats.neighbor_pose_feats.all_pose.self_pose.neighbor_pose.shape.cam'
hparams.PARE.POSE_MLP_NUM_LAYERS = 1
hparams.PARE.SHAPE_MLP_NUM_LAYERS = 1
hparams.PARE.POSE_MLP_HIDDEN_SIZE = 256
hparams.PARE.SHAPE_MLP_HIDDEN_SIZE = 256
hparams.PARE.SHAPE_LOSS_WEIGHT = 0
hparams.PARE.KEYPOINT_LOSS_WEIGHT = 5.
hparams.PARE.KEYPOINT_NATIVE_LOSS_WEIGHT = 5.
hparams.PARE.HEATMAPS_LOSS_WEIGHT = 5.
hparams.PARE.SMPL_PART_LOSS_WEIGHT = 1.
hparams.PARE.PART_SEGM_LOSS_WEIGHT = 1.
hparams.PARE.POSE_LOSS_WEIGHT = 1.
hparams.PARE.BETA_LOSS_WEIGHT = 0.001
hparams.PARE.OPENPOSE_TRAIN_WEIGHT = 0.
hparams.PARE.GT_TRAIN_WEIGHT = 1.
hparams.PARE.LOSS_WEIGHT = 60.
hparams.PARE.USE_SHAPE_REG = False
hparams.PARE.USE_MEAN_CAMSHAPE = False
hparams.PARE.USE_MEAN_POSE = False
hparams.PARE.INIT_XAVIER = False
def get_hparams_defaults():
"""Get a yacs hparamsNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return hparams.clone()
def update_hparams_from_dict(cfg_dict):
hparams = get_hparams_defaults()
cfg = hparams.load_cfg(str(cfg_dict))
hparams.merge_from_other_cfg(cfg)
return hparams.clone() | null |
12,924 | import os
import torch
import torch.nn as nn
from .config import update_hparams
from .head import PareHead
from .backbone.utils import get_backbone_info
from .backbone.hrnet import hrnet_w32
from os.path import join
from easymocap.multistage.torchgeometry import rotation_matrix_to_axis_angle
import cv2
from ..basetopdown import BaseTopDownModelCache
import pickle
def try_to_download():
model_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'models', 'pare')
cmd = 'wget https://www.dropbox.com/s/aeulffqzb3zmh8x/pare-github-data.zip'
os.system(cmd)
os.makedirs(model_dir, exist_ok=True)
cmd = 'unzip pare-github-data.zip -d {}'.format(model_dir)
os.system(cmd) | null |
12,925 | import torch
import numpy as np
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `rot6d_to_rotmat` function. Write a Python function `def rot6d_to_rotmat(x)` to solve the following problem:
Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices
Here is the function:
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
x = x.reshape(-1,3,2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1) | Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices |
12,926 | import torch
import numpy as np
from torch.nn import functional as F
def rotmat_to_rot6d(x):
rotmat = x.reshape(-1, 3, 3)
rot6d = rotmat[:, :, :2].reshape(x.shape[0], -1)
return rot6d | null |
12,927 | import torch
import numpy as np
from torch.nn import functional as F
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""
This function is borrowed from https://github.com/kornia/kornia
Convert quaternion vector to angle axis of rotation.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""
This function is borrowed from https://github.com/kornia/kornia
Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
if not rotation_matrix.shape[-2:] == (3, 4):
raise ValueError(
"Input size must be a N x 3 x 4 tensor. Got {}".format(
rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
mask_c1 = mask_d2 * ~mask_d0_d1
mask_c2 = ~mask_d2 * mask_d0_nd1
mask_c3 = ~mask_d2 * ~mask_d0_nd1
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
The provided code snippet includes necessary dependencies for implementing the `rotation_matrix_to_angle_axis` function. Write a Python function `def rotation_matrix_to_angle_axis(rotation_matrix)` to solve the following problem:
This function is borrowed from https://github.com/kornia/kornia Convert 3x4 rotation matrix to Rodrigues vector Args: rotation_matrix (Tensor): rotation matrix. Returns: Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 4)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 4) # Nx4x4 >>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
Here is the function:
def rotation_matrix_to_angle_axis(rotation_matrix):
"""
This function is borrowed from https://github.com/kornia/kornia
Convert 3x4 rotation matrix to Rodrigues vector
Args:
rotation_matrix (Tensor): rotation matrix.
Returns:
Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 4) # Nx4x4
>>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
"""
if rotation_matrix.shape[1:] == (3,3):
rot_mat = rotation_matrix.reshape(-1, 3, 3)
hom = torch.tensor([0, 0, 1], dtype=torch.float32,
device=rotation_matrix.device).reshape(1, 3, 1).expand(rot_mat.shape[0], -1, -1)
rotation_matrix = torch.cat([rot_mat, hom], dim=-1)
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
aa = quaternion_to_angle_axis(quaternion)
aa[torch.isnan(aa)] = 0.0
return aa | This function is borrowed from https://github.com/kornia/kornia Convert 3x4 rotation matrix to Rodrigues vector Args: rotation_matrix (Tensor): rotation matrix. Returns: Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 4)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 4) # Nx4x4 >>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3 |
12,928 | import torch
import numpy as np
from torch.nn import functional as F
def convert_perspective_to_weak_perspective(
perspective_camera,
focal_length=5000.,
img_res=224,
):
# Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz]
# in 3D given the bounding box size
# This camera translation can be used in a full perspective projection
# if isinstance(focal_length, torch.Tensor):
# focal_length = focal_length[:, 0]
weak_perspective_camera = torch.stack(
[
2 * focal_length / (img_res * perspective_camera[:, 2] + 1e-9),
perspective_camera[:, 0],
perspective_camera[:, 1],
],
dim=-1
)
return weak_perspective_camera | null |
12,929 | import torch
import numpy as np
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `perspective_projection` function. Write a Python function `def perspective_projection(points, rotation, translation, focal_length, camera_center)` to solve the following problem:
This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation focal_length (bs,) or scalar: Focal length camera_center (bs, 2): Camera center
Here is the function:
def perspective_projection(points, rotation, translation,
focal_length, camera_center):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1] | This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation focal_length (bs,) or scalar: Focal length camera_center (bs, 2): Camera center |
12,930 | import torch
import numpy as np
from torch.nn import functional as F
def convert_weak_perspective_to_perspective(
weak_perspective_camera,
focal_length=5000.,
img_res=224,
):
# Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz]
# in 3D given the bounding box size
# This camera translation can be used in a full perspective projection
# if isinstance(focal_length, torch.Tensor):
# focal_length = focal_length[:, 0]
perspective_camera = torch.stack(
[
weak_perspective_camera[:, 1],
weak_perspective_camera[:, 2],
2 * focal_length / (img_res * weak_perspective_camera[:, 0] + 1e-9)
],
dim=-1
)
return perspective_camera
The provided code snippet includes necessary dependencies for implementing the `weak_perspective_projection` function. Write a Python function `def weak_perspective_projection(points, rotation, weak_cam_params, focal_length, camera_center, img_res)` to solve the following problem:
This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation focal_length (bs,) or scalar: Focal length camera_center (bs, 2): Camera center
Here is the function:
def weak_perspective_projection(points, rotation, weak_cam_params, focal_length, camera_center, img_res):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
translation = convert_weak_perspective_to_perspective(weak_cam_params, focal_length, img_res)
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1] | This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation focal_length (bs,) or scalar: Focal length camera_center (bs, 2): Camera center |
12,931 | import torch
import numpy as np
from torch.nn import functional as F
def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000., img_size=224.):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([focal_length,focal_length])
# optical center
center = np.array([img_size/2., img_size/2.])
# transformations
Z = np.reshape(np.tile(S[:,2],(2,1)).T,-1)
XY = np.reshape(S[:,0:2],-1)
O = np.tile(center,num_joints)
F = np.tile(f,num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf),(2,1)).T,-1)
# least squares
Q = np.array([F*np.tile(np.array([1,0]),num_joints), F*np.tile(np.array([0,1]),num_joints), O-np.reshape(joints_2d,-1)]).T
c = (np.reshape(joints_2d,-1)-O)*Z - F*XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W,Q)
c = np.dot(W,c)
# square matrix
A = np.dot(Q.T,Q)
b = np.dot(Q.T,c)
# solution
trans = np.linalg.solve(A, b)
return trans
The provided code snippet includes necessary dependencies for implementing the `estimate_translation` function. Write a Python function `def estimate_translation(S, joints_2d, focal_length=5000., img_size=224., use_all_joints=False, rotation=None)` to solve the following problem:
Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (B, 49, 3) 3D joint locations joints: (B, 49, 3) 2D joint locations and confidence Returns: (B, 3) camera translation vectors
Here is the function:
def estimate_translation(S, joints_2d, focal_length=5000., img_size=224., use_all_joints=False, rotation=None):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
if rotation is not None:
S = torch.einsum('bij,bkj->bki', rotation, S)
# Use only joints 25:49 (GT joints)
if use_all_joints:
S = S.cpu().numpy()
joints_2d = joints_2d.cpu().numpy()
else:
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i, joints_i, conf_i, focal_length=focal_length, img_size=img_size)
return torch.from_numpy(trans).to(device) | Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (B, 49, 3) 3D joint locations joints: (B, 49, 3) 2D joint locations and confidence Returns: (B, 3) camera translation vectors |
12,932 | import torch
import numpy as np
from torch.nn import functional as F
def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000., img_size=224.):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([focal_length,focal_length])
# optical center
center = np.array([img_size/2., img_size/2.])
# transformations
Z = np.reshape(np.tile(S[:,2],(2,1)).T,-1)
XY = np.reshape(S[:,0:2],-1)
O = np.tile(center,num_joints)
F = np.tile(f,num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf),(2,1)).T,-1)
# least squares
Q = np.array([F*np.tile(np.array([1,0]),num_joints), F*np.tile(np.array([0,1]),num_joints), O-np.reshape(joints_2d,-1)]).T
c = (np.reshape(joints_2d,-1)-O)*Z - F*XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W,Q)
c = np.dot(W,c)
# square matrix
A = np.dot(Q.T,Q)
b = np.dot(Q.T,c)
# solution
trans = np.linalg.solve(A, b)
return trans
The provided code snippet includes necessary dependencies for implementing the `estimate_translation_cam` function. Write a Python function `def estimate_translation_cam(S, joints_2d, focal_length=(5000., 5000.), img_size=(224., 224.), use_all_joints=False, rotation=None)` to solve the following problem:
Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (B, 49, 3) 3D joint locations joints: (B, 49, 3) 2D joint locations and confidence Returns: (B, 3) camera translation vectors
Here is the function:
def estimate_translation_cam(S, joints_2d, focal_length=(5000., 5000.), img_size=(224., 224.),
use_all_joints=False, rotation=None):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
def estimate_translation_np(S, joints_2d, joints_conf, focal_length=(5000., 5000.), img_size=(224., 224.)):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([focal_length[0], focal_length[1]])
# optical center
center = np.array([img_size[0] / 2., img_size[1] / 2.])
# transformations
Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1)
XY = np.reshape(S[:, 0:2], -1)
O = np.tile(center, num_joints)
F = np.tile(f, num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)
# least squares
Q = np.array([F * np.tile(np.array([1, 0]), num_joints), F * np.tile(np.array([0, 1]), num_joints),
O - np.reshape(joints_2d, -1)]).T
c = (np.reshape(joints_2d, -1) - O) * Z - F * XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W, Q)
c = np.dot(W, c)
# square matrix
A = np.dot(Q.T, Q)
b = np.dot(Q.T, c)
# solution
trans = np.linalg.solve(A, b)
return trans
device = S.device
if rotation is not None:
S = torch.einsum('bij,bkj->bki', rotation, S)
# Use only joints 25:49 (GT joints)
if use_all_joints:
S = S.cpu().numpy()
joints_2d = joints_2d.cpu().numpy()
else:
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i, joints_i, conf_i, focal_length=focal_length, img_size=img_size)
return torch.from_numpy(trans).to(device) | Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (B, 49, 3) 3D joint locations joints: (B, 49, 3) 2D joint locations and confidence Returns: (B, 3) camera translation vectors |
12,933 | import torch
import numpy as np
from torch.nn import functional as F
def get_coord_maps(size=56):
xx_ones = torch.ones([1, size], dtype=torch.int32)
xx_ones = xx_ones.unsqueeze(-1)
xx_range = torch.arange(size, dtype=torch.int32).unsqueeze(0)
xx_range = xx_range.unsqueeze(1)
xx_channel = torch.matmul(xx_ones, xx_range)
xx_channel = xx_channel.unsqueeze(-1)
yy_ones = torch.ones([1, size], dtype=torch.int32)
yy_ones = yy_ones.unsqueeze(1)
yy_range = torch.arange(size, dtype=torch.int32).unsqueeze(0)
yy_range = yy_range.unsqueeze(-1)
yy_channel = torch.matmul(yy_range, yy_ones)
yy_channel = yy_channel.unsqueeze(-1)
xx_channel = xx_channel.permute(0, 3, 1, 2)
yy_channel = yy_channel.permute(0, 3, 1, 2)
xx_channel = xx_channel.float() / (size - 1)
yy_channel = yy_channel.float() / (size - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
out = torch.cat([xx_channel, yy_channel], dim=1)
return out | null |
12,934 | import torch
import numpy as np
from torch.nn import functional as F
def look_at(eye, at=np.array([0, 0, 0]), up=np.array([0, 0, 1]), eps=1e-5):
at = at.astype(float).reshape(1, 3)
up = up.astype(float).reshape(1, 3)
eye = eye.reshape(-1, 3)
up = up.repeat(eye.shape[0] // up.shape[0], axis=0)
eps = np.array([eps]).reshape(1, 1).repeat(up.shape[0], axis=0)
z_axis = eye - at
z_axis /= np.max(np.stack([np.linalg.norm(z_axis, axis=1, keepdims=True), eps]))
x_axis = np.cross(up, z_axis)
x_axis /= np.max(np.stack([np.linalg.norm(x_axis, axis=1, keepdims=True), eps]))
y_axis = np.cross(z_axis, x_axis)
y_axis /= np.max(np.stack([np.linalg.norm(y_axis, axis=1, keepdims=True), eps]))
r_mat = np.concatenate((x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(-1, 3, 1)), axis=2)
return r_mat
def sample_on_sphere(range_u=(0, 1), range_v=(0, 1)):
u = np.random.uniform(*range_u)
v = np.random.uniform(*range_v)
return to_sphere(u, v)
def sample_pose_on_sphere(range_v=(0,1), range_u=(0,1), radius=1, up=[0,1,0]):
# sample location on unit sphere
loc = sample_on_sphere(range_u, range_v)
# sample radius if necessary
if isinstance(radius, tuple):
radius = np.random.uniform(*radius)
loc = loc * radius
R = look_at(loc, up=np.array(up))[0]
RT = np.concatenate([R, loc.reshape(3, 1)], axis=1)
RT = torch.Tensor(RT.astype(np.float32))
return RT | null |
12,935 | import torch
import numpy as np
from torch.nn import functional as F
def batch_rot2aa(Rs):
def batch_rodrigues(theta):
def rectify_pose(camera_r, body_aa, rotate_x=False):
body_r = batch_rodrigues(body_aa).reshape(-1,3,3)
if rotate_x:
rotate_x = torch.tensor([[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]])
body_r = body_r @ rotate_x
final_r = camera_r @ body_r
body_aa = batch_rot2aa(final_r)
return body_aa | null |
12,936 | import torch
import numpy as np
from torch.nn import functional as F
def euler_to_quaternion(r):
x = r[..., 0]
y = r[..., 1]
z = r[..., 2]
z = z/2.0
y = y/2.0
x = x/2.0
cz = torch.cos(z)
sz = torch.sin(z)
cy = torch.cos(y)
sy = torch.sin(y)
cx = torch.cos(x)
sx = torch.sin(x)
quaternion = torch.zeros_like(r.repeat(1,2))[..., :4].to(r.device)
quaternion[..., 0] += cx*cy*cz - sx*sy*sz
quaternion[..., 1] += cx*sy*sz + cy*cz*sx
quaternion[..., 2] += cx*cz*sy - sx*cy*sz
quaternion[..., 3] += cx*cy*sz + sx*cz*sy
return quaternion
def quaternion_to_rotation_matrix(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def batch_euler2matrix(r):
return quaternion_to_rotation_matrix(euler_to_quaternion(r)) | null |
12,937 | import torch
import numpy as np
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `euler_angles_from_rotmat` function. Write a Python function `def euler_angles_from_rotmat(R)` to solve the following problem:
computer euler angles for rotation around x, y, z axis from rotation amtrix R: 4x4 rotation matrix https://www.gregslabaugh.net/publications/euler.pdf
Here is the function:
def euler_angles_from_rotmat(R):
"""
computer euler angles for rotation around x, y, z axis
from rotation amtrix
R: 4x4 rotation matrix
https://www.gregslabaugh.net/publications/euler.pdf
"""
r21 = np.round(R[:, 2, 0].item(), 4)
if abs(r21) != 1:
y_angle1 = -1 * torch.asin(R[:, 2, 0])
y_angle2 = math.pi + torch.asin(R[:, 2, 0])
cy1, cy2 = torch.cos(y_angle1), torch.cos(y_angle2)
x_angle1 = torch.atan2(R[:, 2, 1] / cy1, R[:, 2, 2] / cy1)
x_angle2 = torch.atan2(R[:, 2, 1] / cy2, R[:, 2, 2] / cy2)
z_angle1 = torch.atan2(R[:, 1, 0] / cy1, R[:, 0, 0] / cy1)
z_angle2 = torch.atan2(R[:, 1, 0] / cy2, R[:, 0, 0] / cy2)
s1 = (x_angle1, y_angle1, z_angle1)
s2 = (x_angle2, y_angle2, z_angle2)
s = (s1, s2)
else:
z_angle = torch.tensor([0], device=R.device).float()
if r21 == -1:
y_angle = torch.tensor([math.pi / 2], device=R.device).float()
x_angle = z_angle + torch.atan2(R[:, 0, 1], R[:, 0, 2])
else:
y_angle = -torch.tensor([math.pi / 2], device=R.device).float()
x_angle = -z_angle + torch.atan2(-R[:, 0, 1], R[:, 0, 2])
s = ((x_angle, y_angle, z_angle),)
return s | computer euler angles for rotation around x, y, z axis from rotation amtrix R: 4x4 rotation matrix https://www.gregslabaugh.net/publications/euler.pdf |
12,938 | import numpy as np
def keypoint_hflip(kp, img_width):
# Flip a keypoint horizontally around the y-axis
# kp N,2
if len(kp.shape) == 2:
kp[:,0] = (img_width - 1.) - kp[:,0]
elif len(kp.shape) == 3:
kp[:, :, 0] = (img_width - 1.) - kp[:, :, 0]
return kp | null |
12,939 | import numpy as np
def convert_kps(joints2d, src, dst):
src_names = eval(f'get_{src}_joint_names')()
dst_names = eval(f'get_{dst}_joint_names')()
out_joints2d = np.zeros((joints2d.shape[0], len(dst_names), joints2d.shape[-1]))
for idx, jn in enumerate(dst_names):
if jn in src_names:
out_joints2d[:, idx] = joints2d[:, src_names.index(jn)]
return out_joints2d | null |
12,940 | import numpy as np
def get_perm_idxs(src, dst):
src_names = eval(f'get_{src}_joint_names')()
dst_names = eval(f'get_{dst}_joint_names')()
idxs = [src_names.index(h) for h in dst_names if h in src_names]
return idxs | null |
12,941 | import numpy as np
def get_mpii3d_test_joint_names():
return [
'headtop', # 'head_top',
'neck',
'rshoulder',# 'right_shoulder',
'relbow',# 'right_elbow',
'rwrist',# 'right_wrist',
'lshoulder',# 'left_shoulder',
'lelbow', # 'left_elbow',
'lwrist', # 'left_wrist',
'rhip', # 'right_hip',
'rknee', # 'right_knee',
'rankle',# 'right_ankle',
'lhip',# 'left_hip',
'lknee',# 'left_knee',
'lankle',# 'left_ankle'
'hip',# 'pelvis',
'Spine (H36M)',# 'spine',
'Head (H36M)',# 'head'
] | null |
12,942 | import numpy as np
def get_mpii3d_joint_names():
return [
'spine3', # 0,
'spine4', # 1,
'spine2', # 2,
'Spine (H36M)', #'spine', # 3,
'hip', # 'pelvis', # 4,
'neck', # 5,
'Head (H36M)', # 'head', # 6,
"headtop", # 'head_top', # 7,
'left_clavicle', # 8,
"lshoulder", # 'left_shoulder', # 9,
"lelbow", # 'left_elbow',# 10,
"lwrist", # 'left_wrist',# 11,
'left_hand',# 12,
'right_clavicle',# 13,
'rshoulder',# 'right_shoulder',# 14,
'relbow',# 'right_elbow',# 15,
'rwrist',# 'right_wrist',# 16,
'right_hand',# 17,
'lhip', # left_hip',# 18,
'lknee', # 'left_knee',# 19,
'lankle', #left ankle # 20
'left_foot', # 21
'left_toe', # 22
"rhip", # 'right_hip',# 23
"rknee", # 'right_knee',# 24
"rankle", #'right_ankle', # 25
'right_foot',# 26
'right_toe' # 27
] | null |
12,943 | import numpy as np
def get_insta_joint_names():
return [
'OP RHeel',
'OP RKnee',
'OP RHip',
'OP LHip',
'OP LKnee',
'OP LHeel',
'OP RWrist',
'OP RElbow',
'OP RShoulder',
'OP LShoulder',
'OP LElbow',
'OP LWrist',
'OP Neck',
'headtop',
'OP Nose',
'OP LEye',
'OP REye',
'OP LEar',
'OP REar',
'OP LBigToe',
'OP RBigToe',
'OP LSmallToe',
'OP RSmallToe',
'OP LAnkle',
'OP RAnkle',
] | null |
12,944 | import numpy as np
def get_mmpose_joint_names():
# this naming is for the first 23 joints of MMPose
# does not include hands and face
return [
'OP Nose', # 1
'OP LEye', # 2
'OP REye', # 3
'OP LEar', # 4
'OP REar', # 5
'OP LShoulder', # 6
'OP RShoulder', # 7
'OP LElbow', # 8
'OP RElbow', # 9
'OP LWrist', # 10
'OP RWrist', # 11
'OP LHip', # 12
'OP RHip', # 13
'OP LKnee', # 14
'OP RKnee', # 15
'OP LAnkle', # 16
'OP RAnkle', # 17
'OP LBigToe', # 18
'OP LSmallToe', # 19
'OP LHeel', # 20
'OP RBigToe', # 21
'OP RSmallToe', # 22
'OP RHeel', # 23
] | null |
12,945 | import numpy as np
def get_insta_skeleton():
return np.array(
[
[0 , 1],
[1 , 2],
[2 , 3],
[3 , 4],
[4 , 5],
[6 , 7],
[7 , 8],
[8 , 9],
[9 ,10],
[2 , 8],
[3 , 9],
[10,11],
[8 ,12],
[9 ,12],
[12,13],
[12,14],
[14,15],
[14,16],
[15,17],
[16,18],
[0 ,20],
[20,22],
[5 ,19],
[19,21],
[5 ,23],
[0 ,24],
]) | null |
12,946 | import numpy as np
def get_staf_skeleton():
return np.array(
[
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 8],
[8, 9],
[9, 10],
[10, 11],
[8, 12],
[12, 13],
[13, 14],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[2, 9],
[5, 12],
[1, 19],
[20, 19],
]
) | null |
12,947 | import numpy as np
def get_staf_joint_names():
return [
'OP Nose', # 0,
'OP Neck', # 1,
'OP RShoulder', # 2,
'OP RElbow', # 3,
'OP RWrist', # 4,
'OP LShoulder', # 5,
'OP LElbow', # 6,
'OP LWrist', # 7,
'OP MidHip', # 8,
'OP RHip', # 9,
'OP RKnee', # 10,
'OP RAnkle', # 11,
'OP LHip', # 12,
'OP LKnee', # 13,
'OP LAnkle', # 14,
'OP REye', # 15,
'OP LEye', # 16,
'OP REar', # 17,
'OP LEar', # 18,
'Neck (LSP)', # 19,
'Top of Head (LSP)', # 20,
] | null |
12,948 | import numpy as np
def get_spin_op_joint_names():
return [
'OP Nose', # 0
'OP Neck', # 1
'OP RShoulder', # 2
'OP RElbow', # 3
'OP RWrist', # 4
'OP LShoulder', # 5
'OP LElbow', # 6
'OP LWrist', # 7
'OP MidHip', # 8
'OP RHip', # 9
'OP RKnee', # 10
'OP RAnkle', # 11
'OP LHip', # 12
'OP LKnee', # 13
'OP LAnkle', # 14
'OP REye', # 15
'OP LEye', # 16
'OP REar', # 17
'OP LEar', # 18
'OP LBigToe', # 19
'OP LSmallToe', # 20
'OP LHeel', # 21
'OP RBigToe', # 22
'OP RSmallToe', # 23
'OP RHeel', # 24
] | null |
12,949 | import numpy as np
def get_openpose_joint_names():
return [
'OP Nose', # 0
'OP Neck', # 1
'OP RShoulder', # 2
'OP RElbow', # 3
'OP RWrist', # 4
'OP LShoulder', # 5
'OP LElbow', # 6
'OP LWrist', # 7
'OP MidHip', # 8
'OP RHip', # 9
'OP RKnee', # 10
'OP RAnkle', # 11
'OP LHip', # 12
'OP LKnee', # 13
'OP LAnkle', # 14
'OP REye', # 15
'OP LEye', # 16
'OP REar', # 17
'OP LEar', # 18
'OP LBigToe', # 19
'OP LSmallToe', # 20
'OP LHeel', # 21
'OP RBigToe', # 22
'OP RSmallToe', # 23
'OP RHeel', # 24
] | null |
12,950 | import numpy as np
def get_spin_joint_names():
return [
'OP Nose', # 0
'OP Neck', # 1
'OP RShoulder', # 2
'OP RElbow', # 3
'OP RWrist', # 4
'OP LShoulder', # 5
'OP LElbow', # 6
'OP LWrist', # 7
'OP MidHip', # 8
'OP RHip', # 9
'OP RKnee', # 10
'OP RAnkle', # 11
'OP LHip', # 12
'OP LKnee', # 13
'OP LAnkle', # 14
'OP REye', # 15
'OP LEye', # 16
'OP REar', # 17
'OP LEar', # 18
'OP LBigToe', # 19
'OP LSmallToe', # 20
'OP LHeel', # 21
'OP RBigToe', # 22
'OP RSmallToe', # 23
'OP RHeel', # 24
'rankle', # 25
'rknee', # 26
'rhip', # 27
'lhip', # 28
'lknee', # 29
'lankle', # 30
'rwrist', # 31
'relbow', # 32
'rshoulder', # 33
'lshoulder', # 34
'lelbow', # 35
'lwrist', # 36
'neck', # 37
'headtop', # 38
'hip', # 39 'Pelvis (MPII)', # 39
'thorax', # 40 'Thorax (MPII)', # 40
'Spine (H36M)', # 41
'Jaw (H36M)', # 42
'Head (H36M)', # 43
'nose', # 44
'leye', # 45 'Left Eye', # 45
'reye', # 46 'Right Eye', # 46
'lear', # 47 'Left Ear', # 47
'rear', # 48 'Right Ear', # 48
] | null |
12,951 | import numpy as np
def get_muco3dhp_joint_names():
return [
'headtop',
'thorax',
'rshoulder',
'relbow',
'rwrist',
'lshoulder',
'lelbow',
'lwrist',
'rhip',
'rknee',
'rankle',
'lhip',
'lknee',
'lankle',
'hip',
'Spine (H36M)',
'Head (H36M)',
'R_Hand',
'L_Hand',
'R_Toe',
'L_Toe'
] | null |
12,952 | import numpy as np
def get_h36m_joint_names():
return [
'hip', # 0
'lhip', # 1
'lknee', # 2
'lankle', # 3
'rhip', # 4
'rknee', # 5
'rankle', # 6
'Spine (H36M)', # 7
'neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'lshoulder', # 11
'lelbow', # 12
'lwrist', # 13
'rshoulder', # 14
'relbow', # 15
'rwrist', # 16
] | null |
12,953 | import numpy as np
def get_spin_skeleton():
return np.array(
[
[0 , 1],
[1 , 2],
[2 , 3],
[3 , 4],
[1 , 5],
[5 , 6],
[6 , 7],
[1 , 8],
[8 , 9],
[9 ,10],
[10,11],
[8 ,12],
[12,13],
[13,14],
[0 ,15],
[0 ,16],
[15,17],
[16,18],
[21,19],
[19,20],
[14,21],
[11,24],
[24,22],
[22,23],
[0 ,38],
]
) | null |
12,954 | import numpy as np
def get_openpose_skeleton():
return np.array(
[
[0 , 1],
[1 , 2],
[2 , 3],
[3 , 4],
[1 , 5],
[5 , 6],
[6 , 7],
[1 , 8],
[8 , 9],
[9 ,10],
[10,11],
[8 ,12],
[12,13],
[13,14],
[0 ,15],
[0 ,16],
[15,17],
[16,18],
[21,19],
[19,20],
[14,21],
[11,24],
[24,22],
[22,23],
]
) | null |
12,955 | import numpy as np
def get_posetrack_joint_names():
return [
"nose",
"neck",
"headtop",
"lear",
"rear",
"lshoulder",
"rshoulder",
"lelbow",
"relbow",
"lwrist",
"rwrist",
"lhip",
"rhip",
"lknee",
"rknee",
"lankle",
"rankle"
] | null |
12,956 | import numpy as np
def get_posetrack_original_kp_names():
return [
'nose',
'head_bottom',
'head_top',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
] | null |
12,957 | import numpy as np
def get_pennaction_joint_names():
return [
"headtop", # 0
"lshoulder", # 1
"rshoulder", # 2
"lelbow", # 3
"relbow", # 4
"lwrist", # 5
"rwrist", # 6
"lhip" , # 7
"rhip" , # 8
"lknee", # 9
"rknee" , # 10
"lankle", # 11
"rankle" # 12
] | null |
12,958 | import numpy as np
def get_common_joint_names():
return [
"rankle", # 0 "lankle", # 0
"rknee", # 1 "lknee", # 1
"rhip", # 2 "lhip", # 2
"lhip", # 3 "rhip", # 3
"lknee", # 4 "rknee", # 4
"lankle", # 5 "rankle", # 5
"rwrist", # 6 "lwrist", # 6
"relbow", # 7 "lelbow", # 7
"rshoulder", # 8 "lshoulder", # 8
"lshoulder", # 9 "rshoulder", # 9
"lelbow", # 10 "relbow", # 10
"lwrist", # 11 "rwrist", # 11
"neck", # 12 "neck", # 12
"headtop", # 13 "headtop", # 13
] | null |
12,959 | import numpy as np
def get_common_paper_joint_names():
return [
"Right Ankle", # 0 "lankle", # 0
"Right Knee", # 1 "lknee", # 1
"Right Hip", # 2 "lhip", # 2
"Left Hip", # 3 "rhip", # 3
"Left Knee", # 4 "rknee", # 4
"Left Ankle", # 5 "rankle", # 5
"Right Wrist", # 6 "lwrist", # 6
"Right Elbow", # 7 "lelbow", # 7
"Right Shoulder", # 8 "lshoulder", # 8
"Left Shoulder", # 9 "rshoulder", # 9
"Left Elbow", # 10 "relbow", # 10
"Left Wrist", # 11 "rwrist", # 11
"Neck", # 12 "neck", # 12
"Head", # 13 "headtop", # 13
] | null |
12,960 | import numpy as np
def get_common_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
) | null |
12,961 | import numpy as np
def get_coco_joint_names():
return [
"nose", # 0
"leye", # 1
"reye", # 2
"lear", # 3
"rear", # 4
"lshoulder", # 5
"rshoulder", # 6
"lelbow", # 7
"relbow", # 8
"lwrist", # 9
"rwrist", # 10
"lhip", # 11
"rhip", # 12
"lknee", # 13
"rknee", # 14
"lankle", # 15
"rankle", # 16
] | null |
12,962 | import numpy as np
def get_ochuman_joint_names():
return [
'rshoulder',
'relbow',
'rwrist',
'lshoulder',
'lelbow',
'lwrist',
'rhip',
'rknee',
'rankle',
'lhip',
'lknee',
'lankle',
'headtop',
'neck',
'rear',
'lear',
'nose',
'reye',
'leye'
] | null |
12,963 | import numpy as np
def get_crowdpose_joint_names():
return [
'lshoulder',
'rshoulder',
'lelbow',
'relbow',
'lwrist',
'rwrist',
'lhip',
'rhip',
'lknee',
'rknee',
'lankle',
'rankle',
'headtop',
'neck'
] | null |
12,964 | import numpy as np
def get_coco_skeleton():
# 0 - nose,
# 1 - leye,
# 2 - reye,
# 3 - lear,
# 4 - rear,
# 5 - lshoulder,
# 6 - rshoulder,
# 7 - lelbow,
# 8 - relbow,
# 9 - lwrist,
# 10 - rwrist,
# 11 - lhip,
# 12 - rhip,
# 13 - lknee,
# 14 - rknee,
# 15 - lankle,
# 16 - rankle,
return np.array(
[
[15, 13],
[13, 11],
[16, 14],
[14, 12],
[11, 12],
[ 5, 11],
[ 6, 12],
[ 5, 6 ],
[ 5, 7 ],
[ 6, 8 ],
[ 7, 9 ],
[ 8, 10],
[ 1, 2 ],
[ 0, 1 ],
[ 0, 2 ],
[ 1, 3 ],
[ 2, 4 ],
[ 3, 5 ],
[ 4, 6 ]
]
) | null |
12,965 | import numpy as np
def get_mpii_joint_names():
return [
"rankle", # 0
"rknee", # 1
"rhip", # 2
"lhip", # 3
"lknee", # 4
"lankle", # 5
"hip", # 6
"thorax", # 7
"neck", # 8
"headtop", # 9
"rwrist", # 10
"relbow", # 11
"rshoulder", # 12
"lshoulder", # 13
"lelbow", # 14
"lwrist", # 15
] | null |
12,966 | import numpy as np
def get_mpii_skeleton():
# 0 - rankle,
# 1 - rknee,
# 2 - rhip,
# 3 - lhip,
# 4 - lknee,
# 5 - lankle,
# 6 - hip,
# 7 - thorax,
# 8 - neck,
# 9 - headtop,
# 10 - rwrist,
# 11 - relbow,
# 12 - rshoulder,
# 13 - lshoulder,
# 14 - lelbow,
# 15 - lwrist,
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 2, 6 ],
[ 6, 3 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 9 ],
[ 7, 12],
[12, 11],
[11, 10],
[ 7, 13],
[13, 14],
[14, 15]
]
) | null |
12,967 | import numpy as np
def get_aich_joint_names():
return [
"rshoulder", # 0
"relbow", # 1
"rwrist", # 2
"lshoulder", # 3
"lelbow", # 4
"lwrist", # 5
"rhip", # 6
"rknee", # 7
"rankle", # 8
"lhip", # 9
"lknee", # 10
"lankle", # 11
"headtop", # 12
"neck", # 13
] | null |
12,968 | import numpy as np
def get_aich_skeleton():
# 0 - rshoulder,
# 1 - relbow,
# 2 - rwrist,
# 3 - lshoulder,
# 4 - lelbow,
# 5 - lwrist,
# 6 - rhip,
# 7 - rknee,
# 8 - rankle,
# 9 - lhip,
# 10 - lknee,
# 11 - lankle,
# 12 - headtop,
# 13 - neck,
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 9, 10],
[10, 11],
[12, 13],
[13, 0 ],
[13, 3 ],
[ 0, 6 ],
[ 3, 9 ]
]
) | null |
12,969 | import numpy as np
def get_3dpw_joint_names():
return [
"nose", # 0
"thorax", # 1
"rshoulder", # 2
"relbow", # 3
"rwrist", # 4
"lshoulder", # 5
"lelbow", # 6
"lwrist", # 7
"rhip", # 8
"rknee", # 9
"rankle", # 10
"lhip", # 11
"lknee", # 12
"lankle", # 13
] | null |
12,970 | import numpy as np
def get_3dpw_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 2, 3 ],
[ 3, 4 ],
[ 1, 5 ],
[ 5, 6 ],
[ 6, 7 ],
[ 2, 8 ],
[ 5, 11],
[ 8, 11],
[ 8, 9 ],
[ 9, 10],
[11, 12],
[12, 13]
]
) | null |
12,971 | import numpy as np
def get_smplcoco_joint_names():
return [
"rankle", # 0
"rknee", # 1
"rhip", # 2
"lhip", # 3
"lknee", # 4
"lankle", # 5
"rwrist", # 6
"relbow", # 7
"rshoulder", # 8
"lshoulder", # 9
"lelbow", # 10
"lwrist", # 11
"neck", # 12
"headtop", # 13
"nose", # 14
"leye", # 15
"reye", # 16
"lear", # 17
"rear", # 18
] | null |
12,972 | import numpy as np
def get_smplcoco_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 12],
[12, 9 ],
[ 9, 10],
[10, 11],
[12, 13],
[14, 15],
[15, 17],
[16, 18],
[14, 16],
[ 8, 2 ],
[ 9, 3 ],
[ 2, 3 ],
]
) | null |
12,973 | import numpy as np
def get_smpl_joint_names():
return [
'hips', # 0
'leftUpLeg', # 1
'rightUpLeg', # 2
'spine', # 3
'leftLeg', # 4
'rightLeg', # 5
'spine1', # 6
'leftFoot', # 7
'rightFoot', # 8
'spine2', # 9
'leftToeBase', # 10
'rightToeBase', # 11
'neck', # 12
'leftShoulder', # 13
'rightShoulder', # 14
'head', # 15
'leftArm', # 16
'rightArm', # 17
'leftForeArm', # 18
'rightForeArm', # 19
'leftHand', # 20
'rightHand', # 21
'leftHandIndex1', # 22
'rightHandIndex1', # 23
] | null |
12,974 | import numpy as np
def get_smpl_paper_joint_names():
return [
'Hips', # 0
'Left Hip', # 1
'Right Hip', # 2
'Spine', # 3
'Left Knee', # 4
'Right Knee', # 5
'Spine_1', # 6
'Left Ankle', # 7
'Right Ankle', # 8
'Spine_2', # 9
'Left Toe', # 10
'Right Toe', # 11
'Neck', # 12
'Left Shoulder', # 13
'Right Shoulder', # 14
'Head', # 15
'Left Arm', # 16
'Right Arm', # 17
'Left Elbow', # 18
'Right Elbow', # 19
'Left Hand', # 20
'Right Hand', # 21
'Left Thumb', # 22
'Right Thumb', # 23
] | null |
12,975 | import numpy as np
def get_smpl_neighbor_triplets():
return [
[ 0, 1, 2 ], # 0
[ 1, 4, 0 ], # 1
[ 2, 0, 5 ], # 2
[ 3, 0, 6 ], # 3
[ 4, 7, 1 ], # 4
[ 5, 2, 8 ], # 5
[ 6, 3, 9 ], # 6
[ 7, 10, 4 ], # 7
[ 8, 5, 11], # 8
[ 9, 13, 14], # 9
[10, 7, 4 ], # 10
[11, 8, 5 ], # 11
[12, 9, 15], # 12
[13, 16, 9 ], # 13
[14, 9, 17], # 14
[15, 9, 12], # 15
[16, 18, 13], # 16
[17, 14, 19], # 17
[18, 20, 16], # 18
[19, 17, 21], # 19
[20, 22, 18], # 20
[21, 19, 23], # 21
[22, 20, 18], # 22
[23, 19, 21], # 23
] | null |
12,976 | import numpy as np
def get_smpl_skeleton():
return np.array(
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
]
) | null |
12,977 | import numpy as np
def map_spin_joints_to_smpl():
# this function primarily will be used to copy 2D keypoint
# confidences to pose parameters
return [
[(39, 27, 28), 0], # hip,lhip,rhip->hips
[(28,), 1], # lhip->leftUpLeg
[(27,), 2], # rhip->rightUpLeg
[(41, 27, 28, 39), 3], # Spine->spine
[(29,), 4], # lknee->leftLeg
[(26,), 5], # rknee->rightLeg
[(41, 40, 33, 34,), 6], # spine, thorax ->spine1
[(30,), 7], # lankle->leftFoot
[(25,), 8], # rankle->rightFoot
[(40, 33, 34), 9], # thorax,shoulders->spine2
[(30,), 10], # lankle -> leftToe
[(25,), 11], # rankle -> rightToe
[(37, 42, 33, 34), 12], # neck, shoulders -> neck
[(34,), 13], # lshoulder->leftShoulder
[(33,), 14], # rshoulder->rightShoulder
[(33, 34, 38, 43, 44, 45, 46, 47, 48,), 15], # nose, eyes, ears, headtop, shoulders->head
[(34,), 16], # lshoulder->leftArm
[(33,), 17], # rshoulder->rightArm
[(35,), 18], # lelbow->leftForeArm
[(32,), 19], # relbow->rightForeArm
[(36,), 20], # lwrist->leftHand
[(31,), 21], # rwrist->rightHand
[(36,), 22], # lhand -> leftHandIndex
[(31,), 23], # rhand -> rightHandIndex
] | null |
12,978 | import numpy as np
def map_smpl_to_common():
return [
[(11, 8), 0], # rightToe, rightFoot -> rankle
[(5,), 1], # rightleg -> rknee,
[(2,), 2], # rhip
[(1,), 3], # lhip
[(4,), 4], # leftLeg -> lknee
[(10, 7), 5], # lefttoe, leftfoot -> lankle
[(21, 23), 6], # rwrist
[(18,), 7], # relbow
[(17, 14), 8], # rshoulder
[(16, 13), 9], # lshoulder
[(19,), 10], # lelbow
[(20, 22), 11], # lwrist
[(0, 3, 6, 9, 12), 12], # neck
[(15,), 13], # headtop
] | null |
12,979 | import numpy as np
def relation_among_spin_joints():
# this function primarily will be used to copy 2D keypoint
# confidences to 3D joints
return [
[(), 25],
[(), 26],
[(39,), 27],
[(39,), 28],
[(), 29],
[(), 30],
[(), 31],
[(), 32],
[(), 33],
[(), 34],
[(), 35],
[(), 36],
[(40,42,44,43,38,33,34,), 37],
[(43,44,45,46,47,48,33,34,), 38],
[(27,28,), 39],
[(27,28,37,41,42,), 40],
[(27,28,39,40,), 41],
[(37,38,44,45,46,47,48,), 42],
[(44,45,46,47,48,38,42,37,33,34,), 43],
[(44,45,46,47,48,38,42,37,33,34), 44],
[(44,45,46,47,48,38,42,37,33,34), 45],
[(44,45,46,47,48,38,42,37,33,34), 46],
[(44,45,46,47,48,38,42,37,33,34), 47],
[(44,45,46,47,48,38,42,37,33,34), 48],
] | null |
12,980 | from typing import Any
import numpy as np
from easymocap.mytools.debug_utils import mywarn, log
def solve_translation(X, x, K):
A = np.zeros((2*X.shape[0], 3))
b = np.zeros((2*X.shape[0], 1))
fx, fy = K[0, 0], K[1, 1]
cx, cy = K[0, 2], K[1, 2]
for nj in range(X.shape[0]):
A[2*nj, 0] = 1
A[2*nj + 1, 1] = 1
A[2*nj, 2] = -(x[nj, 0] - cx)/fx
A[2*nj+1, 2] = -(x[nj, 1] - cy)/fy
b[2*nj, 0] = X[nj, 2]*(x[nj, 0] - cx)/fx - X[nj, 0]
b[2*nj+1, 0] = X[nj, 2]*(x[nj, 1] - cy)/fy - X[nj, 1]
A[2*nj:2*nj+2, :] *= x[nj, 2]
b[2*nj:2*nj+2, :] *= x[nj, 2]
trans = np.linalg.inv(A.T @ A) @ A.T @ b
return trans.T[0] | null |
12,981 | import numpy as np
import itertools
from easymocap.mytools.triangulator import batch_triangulate, project_points
from easymocap.mytools.debug_utils import log, mywarn, myerror
def project_and_distance(kpts3d, RT, kpts2d):
kpts_proj = project_points(kpts3d, RT)
# 1. distance between input and projection
conf = (kpts3d[None, :, -1] > 0) * (kpts2d[:, :, -1] > 0)
dist = np.linalg.norm(kpts_proj[..., :2] - kpts2d[..., :2], axis=-1) * conf
return kpts_proj[..., -1], dist, conf
def remove_outview(kpts2d, out_view, debug):
if len(out_view) == 0:
return False
elif len(out_view) == 1:
# directly remove the outlier view
outv = out_view[0]
if debug:
log('[triangulate] remove outview: {}'.format(outv))
else:
# only remove the first outlier view
outv = out_view[0]
if debug:
mywarn('[triangulate] remove first outview: {} from {}'.format(outv, out_view))
kpts2d[outv] = 0.
return True
def remove_outjoint(kpts2d, Pall, out_joint, dist_max, dist_track, min_view=3, previous=None, debug=False):
MIN_CONF_3D = 0.1
if len(out_joint) == 0:
return False
if debug:
mywarn('[triangulate] remove outjoint: {}'.format(out_joint))
nviews = np.arange(kpts2d.shape[0])
for nj in out_joint:
valid = np.where(kpts2d[:, nj, -1] > 0)[0]
if len(valid) < min_view:
# if less than 3 visible view, set these unvisible
kpts2d[:, nj, -1] = 0
continue
kpts_nj = kpts2d[valid, nj]
Pall_nj = Pall[valid]
view_index = nviews[valid]
view_local = np.arange(valid.shape[0])
comb_views = np.array(list(itertools.combinations(view_local.tolist(), min_view))).T
comb_kpts = kpts_nj[comb_views]
comb_Pall = Pall_nj[comb_views]
comb_k3d = batch_triangulate(comb_kpts, comb_Pall)
depth, dist, conf = project_and_distance(comb_k3d, comb_Pall, comb_kpts)
# 依次选择置信度最高的
sort_by_conf = (-comb_kpts[..., -1].sum(axis=0)).argsort()
flag = (dist[:, sort_by_conf]<dist_max).all(axis=0)
if previous is not None:
dist3d = np.linalg.norm(previous[[nj], :3] - comb_k3d[:, :3], axis=-1) * 1000
flag = flag & ((dist3d[sort_by_conf] < dist_track) | (previous[nj, 3] < MIN_CONF_3D))
valid = sort_by_conf[flag]
if valid.shape[0] == 0:
if debug:
mywarn('[triangulate] cannot find valid combinations of joint {}'.format(nj))
kpts2d[:, nj, -1] = 0
else:
# check all 2D keypoints
k3d = comb_k3d[valid[0]].reshape(1, 4)
depth, dist, conf = project_and_distance(k3d, Pall_nj, kpts_nj[:, None])
valid_view = view_index[np.where(dist < dist_max)[0]]
# 这里需要尝试三角化一下,如果按照新的三角化之后误差更大的话,不应该用新的,而是使用老的
if debug:
log('[triangulate] {} find valid combinations of joint: {}'.format(nj, valid_view))
log('[triangulate] {} distance 2d pixel (max {}): {}'.format(nj, dist_max, dist[np.where(dist < dist_max)[0], 0]))
if previous is not None and previous[nj, 3] > MIN_CONF_3D:
_dist3d = np.linalg.norm(previous[[nj], :3] - k3d[:, :3], axis=-1) * 1000
log('[triangulate] {} distance 3d mm (max {}): {}'.format(nj, dist_track, _dist3d))
if _dist3d > dist_track:
import ipdb; ipdb.set_trace()
set0 = np.zeros(kpts2d.shape[0])
set0[valid_view] = 1.
kpts2d[:, nj, -1] *= set0
return True
def batch_triangulate(keypoints_, Pall, min_view=2):
""" triangulate the keypoints of whole body
Args:
keypoints_ (nViews, nJoints, 3): 2D detections
Pall (nViews, 3, 4) | (nViews, nJoints, 3, 4): projection matrix of each view
min_view (int, optional): min view for visible points. Defaults to 2.
Returns:
keypoints3d: (nJoints, 4)
"""
# keypoints: (nViews, nJoints, 3)
# Pall: (nViews, 3, 4)
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
v = (keypoints_[:, :, -1]>0).sum(axis=0)
valid_joint = np.where(v >= min_view)[0]
keypoints = keypoints_[:, valid_joint]
conf3d = keypoints[:, :, -1].sum(axis=0)/v[valid_joint]
# P2: P矩阵的最后一行:(1, nViews, 1, 4)
if len(Pall.shape) == 3:
P0 = Pall[None, :, 0, :]
P1 = Pall[None, :, 1, :]
P2 = Pall[None, :, 2, :]
else:
P0 = Pall[:, :, 0, :].swapaxes(0, 1)
P1 = Pall[:, :, 1, :].swapaxes(0, 1)
P2 = Pall[:, :, 2, :].swapaxes(0, 1)
# uP2: x坐标乘上P2: (nJoints, nViews, 1, 4)
uP2 = keypoints[:, :, 0].T[:, :, None] * P2
vP2 = keypoints[:, :, 1].T[:, :, None] * P2
conf = keypoints[:, :, 2].T[:, :, None]
Au = conf * (uP2 - P0)
Av = conf * (vP2 - P1)
A = np.hstack([Au, Av])
u, s, v = np.linalg.svd(A)
X = v[:, -1, :]
X = X / X[:, 3:]
# out: (nJoints, 4)
result = np.zeros((keypoints_.shape[1], 4))
result[valid_joint, :3] = X[:, :3]
result[valid_joint, 3] = conf3d #* (conf[..., 0].sum(axis=-1)>min_view)
return result
def log(text):
myprint(text, 'info')
def mywarn(text):
myprint(text, 'warn')
def iterative_triangulate(kpts2d, RT,
min_conf=0.1, min_view=3, min_joints=3, dist_max=0.05, dist_track=50,
thres_outlier_view=0.4, thres_outlier_joint=0.4, debug=True,
previous=None,
**kwargs):
kpts2d = kpts2d.copy()
conf = kpts2d[..., -1]
kpts2d[conf<min_conf] = 0.
if debug:
log('[triangulate] kpts2d: {}'.format(kpts2d.shape))
while True:
# 0. triangulate and project
kpts3d = batch_triangulate(kpts2d, RT, min_view=min_view)
depth, dist, conf = project_and_distance(kpts3d, RT, kpts2d)
# 2. find the outlier
vv, jj = np.where(dist > dist_max)
if vv.shape[0] < 1:
if debug:
log('[triangulate] Not found outlier, break')
break
ratio_outlier_view = (dist>dist_max).sum(axis=1)/(1e-5 + (conf > 0.).sum(axis=1))
ratio_outlier_joint = (dist>dist_max).sum(axis=0)/(1e-5 + (conf > 0.).sum(axis=0))
# 3. find the totally wrong detections
out_view = np.where(ratio_outlier_view > thres_outlier_view)[0]
error_joint = dist.sum(axis=0)/(1e-5 + (conf > 0.).sum(axis=0))
# for joint, we calculate the mean distance of this joint
out_joint = np.where((ratio_outlier_joint > thres_outlier_joint) & (error_joint > dist_max))[0]
if len(out_view) > 1:
# TODO: 如果全都小于0的话,相当于随机丢了,应该增加视角的置信度
# 应该生成多个proposal;然后递归的去寻找
# 不应该直接抛弃的
# 如果有previous的情况,应该用previous来作为判断标准
# cfg = dict(min_conf=min_conf, min_view=min_view, min_joints=min_joints, dist_max=dist_max, dist_track=dist_track,
# thres_outlier_view=thres_outlier_view, thres_outlier_joint=0.4, debug=True, previous=None)
if debug: mywarn('[triangulate] More than one outlier view: {}, stop triangulation.'.format(ratio_outlier_view))
return kpts3d, np.zeros_like(kpts2d)
if debug: mywarn('[triangulate] Remove outlier view give outlier ratio: {}'.format(ratio_outlier_view))
dist_view = dist.sum(axis=1)/(1e-5 + (conf > 0.).sum(axis=1))
out_view = out_view.tolist()
out_view.sort(key=lambda x:-dist_view[x])
if remove_outview(kpts2d, out_view, debug): continue
if len(out_joint) > 0:
if debug:
print(dist[:, out_joint])
mywarn('[triangulate] Remove outlier joint {} given outlier ratio: {}'.format(out_joint, ratio_outlier_joint[out_joint]))
remove_outjoint(kpts2d, RT, out_joint, dist_max, dist_track, previous=previous, debug=debug)
continue
if debug:
log('[triangulate] Directly remove {}, {}'.format(vv, jj))
kpts2d[vv, jj, -1] = 0.
if debug:
log('[triangulate] finally {} valid points, {} not valid'.format((kpts3d[..., -1]>0).sum(), np.where(kpts3d[..., -1]<=0)[0]))
if (kpts3d[..., -1]>0).sum() < min_joints:
kpts3d[..., -1] = 0.
kpts2d[..., -1] = 0.
return kpts3d, kpts2d
return kpts3d, kpts2d | null |
12,982 | import numpy as np
from itertools import combinations
from easymocap.mytools.camera_utils import Undistort
from easymocap.mytools.triangulator import iterative_triangulate
The provided code snippet includes necessary dependencies for implementing the `batch_triangulate` function. Write a Python function `def batch_triangulate(keypoints_, Pall, min_view=2)` to solve the following problem:
triangulate the keypoints of whole body Args: keypoints_ (nViews, nJoints, 3): 2D detections Pall (nViews, 3, 4): projection matrix of each view min_view (int, optional): min view for visible points. Defaults to 2. Returns: keypoints3d: (nJoints, 4)
Here is the function:
def batch_triangulate(keypoints_, Pall, min_view=2):
""" triangulate the keypoints of whole body
Args:
keypoints_ (nViews, nJoints, 3): 2D detections
Pall (nViews, 3, 4): projection matrix of each view
min_view (int, optional): min view for visible points. Defaults to 2.
Returns:
keypoints3d: (nJoints, 4)
"""
# keypoints: (nViews, nJoints, 3)
# Pall: (nViews, 3, 4)
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
v = (keypoints_[:, :, -1]>0).sum(axis=0)
valid_joint = np.where(v >= min_view)[0]
keypoints = keypoints_[:, valid_joint]
conf3d = keypoints[:, :, -1].sum(axis=0)/v[valid_joint]
# P2: P矩阵的最后一行:(1, nViews, 1, 4)
P0 = Pall[None, :, 0, :]
P1 = Pall[None, :, 1, :]
P2 = Pall[None, :, 2, :]
# uP2: x坐标乘上P2: (nJoints, nViews, 1, 4)
uP2 = keypoints[:, :, 0].T[:, :, None] * P2
vP2 = keypoints[:, :, 1].T[:, :, None] * P2
conf = keypoints[:, :, 2].T[:, :, None]
Au = conf * (uP2 - P0)
Av = conf * (vP2 - P1)
A = np.hstack([Au, Av])
u, s, v = np.linalg.svd(A)
X = v[:, -1, :]
X = X / X[:, 3:]
# out: (nJoints, 4)
result = np.zeros((keypoints_.shape[1], 4))
result[valid_joint, :3] = X[:, :3]
result[valid_joint, 3] = conf3d #* (conf[..., 0].sum(axis=-1)>min_view)
return result | triangulate the keypoints of whole body Args: keypoints_ (nViews, nJoints, 3): 2D detections Pall (nViews, 3, 4): projection matrix of each view min_view (int, optional): min view for visible points. Defaults to 2. Returns: keypoints3d: (nJoints, 4) |
12,983 | import numpy as np
from itertools import combinations
from easymocap.mytools.camera_utils import Undistort
from easymocap.mytools.triangulator import iterative_triangulate
def project_wo_dist(keypoints, RT, einsum='vab,kb->vka'):
homo = np.concatenate([keypoints[..., :3], np.ones_like(keypoints[..., :1])], axis=-1)
kpts2d = np.einsum(einsum, RT, homo)
depth = kpts2d[..., 2]
kpts2d[..., :2] /= kpts2d[..., 2:]
return kpts2d, depth | null |
12,984 | from typing import Any
import numpy as np
import cv2
def views_from_dimGroups(dimGroups):
views = np.zeros(dimGroups[-1], dtype=np.int)
for nv in range(len(dimGroups) - 1):
views[dimGroups[nv]:dimGroups[nv+1]] = nv
return views | null |
12,985 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
def dict_of_numpy_to_tensor(body_params, device):
params_ = {}
for key, val in body_params.items():
if isinstance(val, dict):
params_[key] = dict_of_numpy_to_tensor(val, device)
else:
params_[key] = torch.Tensor(val).to(device)
return params_ | null |
12,986 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
def dict_of_tensor_to_numpy(body_params):
params_ = {}
for key, val in body_params.items():
if isinstance(val, dict):
params_[key] = dict_of_tensor_to_numpy(val)
else:
params_[key] = val.cpu().numpy()
return params_ | null |
12,987 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
class LBFGS(Optimizer):
"""Implements L-BFGS algorithm, heavily inspired by `minFunc
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`.
.. warning::
This optimizer doesn't support per-parameter options and parameter
groups (there can be only one).
.. warning::
Right now all parameters have to be on a single device. This will be
improved in the future.
.. note::
This is a very memory intensive optimizer (it requires additional
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
try reducing the history size, or use a different algorithm.
Arguments:
lr (float): learning rate (default: 1)
max_iter (int): maximal number of iterations per optimization step
(default: 20)
max_eval (int): maximal number of function evaluations per optimization
step (default: max_iter * 1.25).
tolerance_grad (float): termination tolerance on first order optimality
(default: 1e-5).
tolerance_change (float): termination tolerance on function
value/parameter changes (default: 1e-9).
history_size (int): update history size (default: 100).
line_search_fn (str): either 'strong_wolfe' or None (default: None).
"""
def __init__(self,
params,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-5,
tolerance_change=1e-9,
history_size=100,
line_search_fn=None):
if max_eval is None:
max_eval = max_iter * 5 // 4
defaults = dict(
lr=lr,
max_iter=max_iter,
max_eval=max_eval,
tolerance_grad=tolerance_grad,
tolerance_change=tolerance_change,
history_size=history_size,
line_search_fn=line_search_fn)
super(LBFGS, self).__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError("LBFGS doesn't support per-parameter options "
"(parameter groups)")
self._params = self.param_groups[0]['params']
self._numel_cache = None
def _numel(self):
if self._numel_cache is None:
self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
return self._numel_cache
def _gather_flat_grad(self):
views = []
for p in self._params:
if p.grad is None:
view = p.new(p.numel()).zero_()
elif p.grad.is_sparse:
view = p.grad.to_dense().view(-1)
else:
view = p.grad.view(-1)
views.append(view)
return torch.cat(views, 0)
def _add_grad(self, step_size, update):
offset = 0
for p in self._params:
numel = p.numel()
# view as to avoid deprecated pointwise semantics
p.data.add_(step_size, update[offset:offset + numel].view_as(p.data))
offset += numel
assert offset == self._numel()
def _clone_param(self):
return [p.clone() for p in self._params]
def _set_param(self, params_data):
for p, pdata in zip(self._params, params_data):
p.data.copy_(pdata)
def _directional_evaluate(self, closure, x, t, d):
self._add_grad(t, d)
loss = float(closure())
flat_grad = self._gather_flat_grad()
self._set_param(x)
return loss, flat_grad
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
group = self.param_groups[0]
lr = group['lr']
max_iter = group['max_iter']
max_eval = group['max_eval']
tolerance_grad = group['tolerance_grad']
tolerance_change = group['tolerance_change']
line_search_fn = group['line_search_fn']
history_size = group['history_size']
# NOTE: LBFGS has only global state, but we register it as state for
# the first param, because this helps with casting in load_state_dict
state = self.state[self._params[0]]
state.setdefault('func_evals', 0)
state.setdefault('n_iter', 0)
# evaluate initial f(x) and df/dx
orig_loss = closure()
loss = float(orig_loss)
current_evals = 1
state['func_evals'] += 1
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
# optimal condition
if opt_cond:
return orig_loss
# tensors cached in state (for tracing)
d = state.get('d')
t = state.get('t')
old_dirs = state.get('old_dirs')
old_stps = state.get('old_stps')
ro = state.get('ro')
H_diag = state.get('H_diag')
prev_flat_grad = state.get('prev_flat_grad')
prev_loss = state.get('prev_loss')
n_iter = 0
# optimize for a max of max_iter iterations
while n_iter < max_iter:
# keep track of nb of iterations
n_iter += 1
state['n_iter'] += 1
############################################################
# compute gradient descent direction
############################################################
if state['n_iter'] == 1:
d = flat_grad.neg()
old_dirs = []
old_stps = []
ro = []
H_diag = 1
else:
# do lbfgs update (update memory)
y = flat_grad.sub(prev_flat_grad)
s = d.mul(t)
ys = y.dot(s) # y*s
if ys > 1e-10:
# updating memory
if len(old_dirs) == history_size:
# shift history by one (limited-memory)
old_dirs.pop(0)
old_stps.pop(0)
ro.pop(0)
# store new direction/step
old_dirs.append(y)
old_stps.append(s)
ro.append(1. / ys)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
# compute the approximate (L-BFGS) inverse Hessian
# multiplied by the gradient
num_old = len(old_dirs)
if 'al' not in state:
state['al'] = [None] * history_size
al = state['al']
# iteration in L-BFGS loop collapsed to use just one buffer
q = flat_grad.neg()
for i in range(num_old - 1, -1, -1):
al[i] = old_stps[i].dot(q) * ro[i]
q.add_(-al[i], old_dirs[i])
# multiply by initial Hessian
# r/d is the final direction
d = r = torch.mul(q, H_diag)
for i in range(num_old):
be_i = old_dirs[i].dot(r) * ro[i]
r.add_(al[i] - be_i, old_stps[i])
if prev_flat_grad is None:
prev_flat_grad = flat_grad.clone()
else:
prev_flat_grad.copy_(flat_grad)
prev_loss = loss
############################################################
# compute step length
############################################################
# reset initial guess for step size
if state['n_iter'] == 1:
t = min(1., 1. / flat_grad.abs().sum()) * lr
else:
t = lr
# directional derivative
gtd = flat_grad.dot(d) # g * d
# directional derivative is below tolerance
if gtd > -tolerance_change:
break
# optional line search: user function
ls_func_evals = 0
if line_search_fn is not None:
# perform line search, using user function
if line_search_fn != "strong_wolfe":
raise RuntimeError("only 'strong_wolfe' is supported")
else:
x_init = self._clone_param()
def obj_func(x, t, d):
return self._directional_evaluate(closure, x, t, d)
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
obj_func, x_init, t, d, loss, flat_grad, gtd)
self._add_grad(t, d)
opt_cond = flat_grad.abs().max() <= tolerance_grad
else:
# no line search, simply move with fixed-step
self._add_grad(t, d)
if n_iter != max_iter:
# re-evaluate function only if not in last iteration
# the reason we do this: in a stochastic setting,
# no use to re-evaluate that function here
loss = float(closure())
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
ls_func_evals = 1
# update func eval
current_evals += ls_func_evals
state['func_evals'] += ls_func_evals
############################################################
# check conditions
############################################################
if n_iter == max_iter:
break
if current_evals >= max_eval:
break
# optimal condition
if opt_cond:
break
# lack of progress
if d.mul(t).abs().max() <= tolerance_change:
break
if abs(loss - prev_loss) < tolerance_change:
break
state['d'] = d
state['t'] = t
state['old_dirs'] = old_dirs
state['old_stps'] = old_stps
state['ro'] = ro
state['H_diag'] = H_diag
state['prev_flat_grad'] = prev_flat_grad
state['prev_loss'] = prev_loss
return orig_loss
def make_optimizer(opt_params, optim_type='lbfgs', max_iter=20,
lr=1e-3, betas=(0.9, 0.999), weight_decay=0.0, **kwargs):
if isinstance(opt_params, dict):
# LBFGS 不支持参数字典
opt_params = list(opt_params.values())
if optim_type == 'lbfgs':
# optimizer = torch.optim.LBFGS(
# opt_params, max_iter=max_iter, lr=lr, line_search_fn='strong_wolfe',
# tolerance_grad= 0.0000001, # float32的有效位数是7位
# tolerance_change=0.0000001,
# )
from easymocap.pyfitting.lbfgs import LBFGS
optimizer = LBFGS(opt_params, line_search_fn='strong_wolfe', max_iter=max_iter,
tolerance_grad= 0.0000001, # float32的有效位数是7位
tolerance_change=0.0000001,
**kwargs)
elif optim_type == 'adam':
optimizer = torch.optim.Adam(opt_params, lr=lr, betas=betas, weight_decay=weight_decay)
else:
raise NotImplementedError
return optimizer | null |
12,988 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
def grad_require(params, flag=False):
if isinstance(params, list):
for par in params:
par.requires_grad = flag
elif isinstance(params, dict):
for key, par in params.items():
par.requires_grad = flag | null |
12,989 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
def make_closure(optimizer, model, params, infos, loss, device):
loss_func = {}
for key, val in loss.items():
loss_func[key] = load_object(val['module'], val['args'])
if isinstance(loss_func[key], nn.Module):
loss_func[key].to(device)
def closure(debug=False):
optimizer.zero_grad()
new_params = params.copy()
output = model(new_params)
loss_dict = {}
loss_weight = {key:loss[key].weight for key in loss_func.keys()}
for key, func in loss_func.items():
output_ = {k: output[k] for k in loss[key].key_from_output}
infos_ = {k: infos[k] for k in loss[key].key_from_infos}
loss_now = func(output_, infos_)
if isinstance(loss_now, dict):
for k, _loss in loss_now.items():
loss_dict[key+'_'+k] = _loss
loss_weight[key+'_'+k] = loss_weight[key]
loss_weight.pop(key)
else:
loss_dict[key] = loss_now
loss_sum = sum([loss_dict[key]*loss_weight[key]
for key in loss_dict.keys()])
# for key in loss_dict.keys():
# print(key, loss_dict[key] * loss_weight[key])
# print(loss_sum)
if debug:
return loss_dict, loss_weight
loss_sum.backward()
return loss_sum
return closure | null |
12,990 | import torch
import torch.nn as nn
from easymocap.config import Config, load_object
from easymocap.mytools.debug_utils import log
def rel_change(prev_val, curr_val):
return (prev_val - curr_val) / max([1e-5, abs(prev_val), abs(curr_val)]) | null |
12,991 | import numpy as np
import cv2
from easymocap.mytools.camera_utils import Undistort
from easymocap.mytools.debug_utils import mywarn
from .triangulate import batch_triangulate, project_wo_dist
from collections import defaultdict
def LOG_ARRAY(array2d, format='{:>8.2f} '):
res = ''
for i in range(array2d.shape[0]):
for j in range(array2d.shape[1]):
res += format.format(array2d[i, j])
res += '\n'
return res | null |
12,992 | import numpy as np
import cv2
from easymocap.mytools.camera_utils import Undistort
from easymocap.mytools.debug_utils import log, mywarn, myerror
from .iterative_triangulate import iterative_triangulate
from easymocap.mytools.triangulator import project_points, batch_triangulate
from easymocap.mytools.timer import Timer
def skew_op(x):
res = np.zeros((3, 3), dtype=x.dtype)
# 0, -z, y
res[0, 1] = -x[2, 0]
res[0, 2] = x[1, 0]
# z, 0, -x
res[1, 0] = x[2, 0]
res[1, 2] = -x[0, 0]
# -y, x, 0
res[2, 0] = -x[1, 0]
res[2, 1] = x[0, 0]
return res
def fundamental_op(K0, K1, R_0, T_0, R_1, T_1):
invK0 = np.linalg.inv(K0)
return invK0.T @ (R_0 @ R_1.T) @ K1.T @ skew_op(K1 @ R_1 @ R_0.T @ (T_0 - R_0 @ R_1.T @ T_1)) | null |
12,993 | import os
import torch
import numpy as np
from easymocap.bodymodel.smpl import SMPLModel
from easymocap.mytools.debug_utils import log
def try_to_download_SMPL(model_dir):
cmd = 'wget https://www.dropbox.com/s/aeulffqzb3zmh8x/pare-github-data.zip'
os.system(cmd)
os.makedirs(model_dir, exist_ok=True)
cmd = 'unzip pare-github-data.zip -d {}'.format(model_dir)
print('[RUN] {}'.format(cmd))
os.system(cmd) | null |
12,994 | import os
from typing import Any
import numpy as np
import cv2
from os.path import join
from easymocap.mytools.vis_base import plot_keypoints_auto, merge, plot_bbox, get_rgb, plot_cross
from easymocap.datasets.base import add_logo
from easymocap.mytools.camera_utils import Undistort
def projectPoints(k3d, camera):
k3d0 = np.ascontiguousarray(k3d[:, :3])
k3d_rt = np.dot(k3d0, camera['R'].T) + camera['T'].T
depth = k3d_rt[:, -1:]
k2d, _ = cv2.projectPoints(k3d0, camera['R'], camera['T'], camera['K'], camera['dist'])
k2d = np.hstack([k2d[:, 0], k3d[:, -1:]])
return k2d, depth | null |
12,995 | from tqdm import tqdm
import cv2
import os
from easymocap.visualize.pyrender_wrapper import plot_meshes
from os.path import join
import numpy as np
from easymocap.datasets.base import add_logo
from easymocap.mytools.vis_base import merge, plot_bbox
from easymocap.mytools.camera_utils import Undistort
from .vis import VisBase
def projectPoints(X, K, R, t, Kd):
x = R @ X + t
x[0:2,:] = x[0:2,:]/x[2,:]#到归一化平面
r = x[0,:]*x[0,:] + x[1,:]*x[1,:]
x[0,:] = x[0,:]*(1 + Kd[0]*r + Kd[1]*r*r + Kd[4]*r*r*r) + 2*Kd[2]*x[0,:]*x[1,:] + Kd[3]*(r + 2*x[0,:]*x[0,:])
x[1,:] = x[1,:]*(1 + Kd[0]*r + Kd[1]*r*r + Kd[4]*r*r*r) + 2*Kd[3]*x[0,:]*x[1,:] + Kd[2]*(r + 2*x[1,:]*x[1,:])
x[0,:] = K[0,0]*x[0,:] + K[0,1]*x[1,:] + K[0,2]
x[1,:] = K[1,0]*x[0,:] + K[1,1]*x[1,:] + K[1,2]
return x | null |
12,996 | from easymocap.mytools.camera_utils import read_cameras
from easymocap.mytools.debug_utils import log, myerror, mywarn
from easymocap.mytools.file_utils import read_json
from .basedata import ImageDataBase, read_mv_images, find_best_people, find_all_people
import os
from os.path import join
import numpy as np
import cv2
from collections import defaultdict
panoptic15_in_body15 = [1,0,8,5,6,7,12,13,14,2,3,4,9,10,11]
def convert_body15_panoptic15(keypoints):
k3d_panoptic15 = keypoints[..., panoptic15_in_body15,: ]
return k3d_panoptic15 | null |
12,997 | from easymocap.mytools.camera_utils import read_cameras
from easymocap.mytools.debug_utils import log, myerror, mywarn
from easymocap.mytools.file_utils import read_json
from .basedata import ImageDataBase, read_mv_images, find_best_people, find_all_people
import os
from os.path import join
import numpy as np
import cv2
from collections import defaultdict
panoptic15_in_body15 = [1,0,8,5,6,7,12,13,14,2,3,4,9,10,11]
def convert_panoptic15_body15(keypoints):
keypoints_b15 = np.zeros_like(keypoints)
keypoints_b15[..., panoptic15_in_body15, :] = keypoints
return keypoints_b15 | null |
12,998 | from easymocap.mytools.camera_utils import read_cameras
from easymocap.mytools.debug_utils import log, myerror, mywarn
from easymocap.mytools.file_utils import read_json
from .basedata import ImageDataBase, read_mv_images, find_best_people, find_all_people
import os
from os.path import join
import numpy as np
import cv2
from collections import defaultdict
def padding_and_stack(datas):
shapes = {}
for data in datas:
if len(data) == 0:
continue
for key, value in data.items():
if key not in shapes.keys():
shapes[key] = value.shape
collect = {key: np.zeros((len(datas), *shapes[key])) for key in shapes.keys()}
for i, data in enumerate(datas):
for key, value in data.items():
collect[key][i] = value
return collect | null |
12,999 | from easymocap.mytools.camera_utils import read_cameras
from easymocap.mytools.debug_utils import log, myerror, mywarn
from easymocap.mytools.file_utils import read_json
from .basedata import ImageDataBase, read_mv_images, find_best_people, find_all_people
import os
from os.path import join
import numpy as np
import cv2
from collections import defaultdict
def padding_empty(datas):
shapes = {}
for data in datas:
if len(data) == 0:
continue
for key, value in data.items():
if key not in shapes.keys():
shapes[key] = value.shape[1:]
collect = {key: [None for data in datas] for key in shapes.keys()}
for i, data in enumerate(datas):
for key, shape in shapes.items():
if key not in data.keys():
print('[Dataset] padding empty view {} of {}'.format(i, key))
collect[key][i] = np.zeros((0, *shape), dtype=np.float32)
else:
collect[key][i] = data[key]
return collect | null |
13,000 | from easymocap.mytools.camera_utils import read_cameras
from easymocap.mytools.debug_utils import log, myerror, mywarn
from easymocap.mytools.file_utils import read_json
from .basedata import ImageDataBase, read_mv_images, find_best_people, find_all_people
import os
from os.path import join
import numpy as np
import cv2
from collections import defaultdict
def parse_frames(pafs_frame, H, W):
# 解析单帧的
res = {
'joints': [],
'pafs': {}
}
joints = pafs_frame[1:1+3*25]
for i in range(25):
value = np.fromstring(joints[3*i+2], sep=' ').reshape(3, -1).T
value[:, 0] = value[:, 0] * W
value[:, 1] = value[:, 1] * H
res['joints'].append(value.astype(np.float32))
# parse pafs
pafs = pafs_frame[1+3*25+1:]
for npart in range(26):
label = pafs[3*npart+0].split(' ')[2:]
label = (int(label[0]), int(label[1]))
shape = pafs[3*npart+1].split(' ')[2:]
w, h = int(shape[0]), int(shape[1])
value = np.fromstring(pafs[3*npart+2], sep=' ').reshape(w, h).astype(np.float32)
res['pafs'][label] = value
return res
def read_4dassociation(pafs, H, W):
outputs = []
# 解析paf文件
with open(pafs, 'r') as f:
pafs = f.readlines()
indices = []
for i, line in enumerate(pafs):
if line.startswith('# newframes:'):
indices.append([i])
elif line.startswith('# end frames:'):
indices[-1].append(i)
print('[Read OpenPose] Totally {} frames'.format(len(indices)))
for (start, end) in indices:
pafs_frame = pafs[start+1:end]
pafs_frame = list(map(lambda x:x.strip(), pafs_frame))
frames = parse_frames(pafs_frame, H, W)
outputs.append(frames)
return outputs | null |
13,001 | import os
from os.path import join
import numpy as np
import cv2
from easymocap.mytools.debug_utils import log, myerror, mywarn
def log(text):
myprint(text, 'info')
def read_mv_images(root, root_images, ext, subs):
assert os.path.exists(os.path.join(root, root_images)), f'root {root}/{root_images} not exists'
if len(subs) == 0:
subs = sorted(os.listdir(os.path.join(root, root_images)))
if subs[0].isdigit():
subs = sorted(subs, key=lambda x: int(x))
imagelists = []
log(f'Found {len(subs)} subjects in {root}/{root_images}')
for sub in subs:
images = sorted(os.listdir(os.path.join(root, root_images, sub)))
images = [os.path.join(root, root_images, sub, image) for image in images if image.endswith(ext)]
log(f' -> Found {len(images)} {root_images} in {sub}.')
imagelists.append(images)
min_length = min([len(image) for image in imagelists])
log(f' -> Min length: {min_length}')
imagenames = [[image[i] for image in imagelists] for i in range(min_length)]
return imagenames, {'subs': subs} | null |
13,002 | import os
from os.path import join
import numpy as np
import cv2
from easymocap.mytools.debug_utils import log, myerror, mywarn
def FloatArray(x):
return np.array(x, dtype=np.float32)
def find_best_people(annots):
if len(annots) == 0:
return {}
# TODO: find the best
annot = annots[0]
bbox = FloatArray(annot['bbox'])
if 'keypoints' not in annot.keys():
return {}
keypoints = FloatArray(annot['keypoints'])
return {'bbox': bbox, 'keypoints': keypoints} | null |
13,003 | import os
from os.path import join
import numpy as np
import cv2
from easymocap.mytools.debug_utils import log, myerror, mywarn
def FloatArray(x):
return np.array(x, dtype=np.float32)
def find_all_people(annots):
if len(annots) == 0:
return {}
bbox = FloatArray([annot['bbox'] for annot in annots])
keypoints = FloatArray([annot['keypoints'] for annot in annots])
return {'bbox': bbox, 'keypoints': keypoints} | null |
13,004 | from tqdm import tqdm
import numpy as np
import os
from os.path import join
from glob import glob
from ..affinity.affinity import getDimGroups
from ..affinity.matchSVT import matchSVT
from ..mytools.reader import read_keypoints2d, read_keypoints3d
from ..mytools.file_utils import read_annot, read_json, save_annot, save_json, write_keypoints3d
def check_path(x):
assert os.path.exists(x), '{} not exists!'.format(x) | null |
13,005 | from termcolor import colored
import os
from os.path import join
import shutil
import subprocess
import time
import datetime
def toc():
return time.time() * 1000 | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.