id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,300 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu") | null |
21,301 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,302 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_linear(linear: Linear):
init.normal_(linear.weight, 0, 0.01)
init.constant_(linear.bias, 0) | null |
21,303 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet 18 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet18(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 18 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=64, downsample=False
),
ResNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, downsample=True
),
ResNetSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, downsample=True
),
ResNetSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, downsample=True
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet 18 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,304 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnetv2_18` function. Write a Python function `def resnetv2_18(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet V2 18 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnetv2_18(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet V2 18 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=64, downsample=False, version=2
),
ResNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, downsample=True, version=2
),
ResNetSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, downsample=True, version=2
),
ResNetSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, downsample=True, version=2
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet V2 18 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,305 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet 34 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet34(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 34 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3, in_channels=64, out_channels=64, downsample=False
),
ResNetSectionSettings(
num_blocks=4, in_channels=64, out_channels=128, downsample=True
),
ResNetSectionSettings(
num_blocks=6, in_channels=128, out_channels=256, downsample=True
),
ResNetSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, downsample=True
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet 34 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,306 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnetv2_34` function. Write a Python function `def resnetv2_34(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet V2 34 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnetv2_34(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet V2 34 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3, in_channels=64, out_channels=64, downsample=False, version=2
),
ResNetSectionSettings(
num_blocks=4, in_channels=64, out_channels=128, downsample=True, version=2
),
ResNetSectionSettings(
num_blocks=6, in_channels=128, out_channels=256, downsample=True, version=2
),
ResNetSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, downsample=True, version=2
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet V2 34 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,307 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet50(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 50 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
),
ResNetSectionSettings(
num_blocks=6,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,308 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnetv2_50` function. Write a Python function `def resnetv2_50(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet V2 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnetv2_50(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet V2 50 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
version=2,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
version=2,
),
ResNetSectionSettings(
num_blocks=6,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
version=2,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
version=2,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet V2 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,309 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet50_2xwidth` function. Write a Python function `def resnet50_2xwidth(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
ResNet 50 implementation where channel sizes for 3x3 convolutions are doubled; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet50_2xwidth(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
ResNet 50 implementation where channel sizes for 3x3 convolutions are doubled;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=128,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=256,
),
ResNetSectionSettings(
num_blocks=6,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=512,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=1024,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | ResNet 50 implementation where channel sizes for 3x3 convolutions are doubled; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,310 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnext50` function. Write a Python function `def resnext50(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNext 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnext50(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNext 50 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=128,
groups=32,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=256,
groups=32,
),
ResNetSectionSettings(
num_blocks=6,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=512,
groups=32,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=1024,
groups=32,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNext 50 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,311 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet101` function. Write a Python function `def resnet101(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet101(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 101 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
),
ResNetSectionSettings(
num_blocks=23,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,312 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnetv2_101` function. Write a Python function `def resnetv2_101(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet V2 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnetv2_101(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet V2 101 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
version=2,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
version=2,
),
ResNetSectionSettings(
num_blocks=23,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
version=2,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
version=2,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet V2 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,313 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet101_2xwidth` function. Write a Python function `def resnet101_2xwidth(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
ResNet 101 implementation where channel sizes for 3x3 convolutions are doubled; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet101_2xwidth(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
ResNet 101 implementation where channel sizes for 3x3 convolutions are doubled;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=128,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=256,
),
ResNetSectionSettings(
num_blocks=23,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=512,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=1024,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | ResNet 101 implementation where channel sizes for 3x3 convolutions are doubled; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,314 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnext101` function. Write a Python function `def resnext101(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNext 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnext101(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNext 101 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=128,
groups=32,
),
ResNetSectionSettings(
num_blocks=4,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=256,
groups=32,
),
ResNetSectionSettings(
num_blocks=23,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=512,
groups=32,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=1024,
groups=32,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNext 101 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,315 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnet152` function. Write a Python function `def resnet152(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnet152(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 152 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
),
ResNetSectionSettings(
num_blocks=8,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
),
ResNetSectionSettings(
num_blocks=36,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,316 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnetv2_152` function. Write a Python function `def resnetv2_152(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNet V2 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnetv2_152(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet V2 152 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=64,
version=2,
),
ResNetSectionSettings(
num_blocks=8,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=128,
version=2,
),
ResNetSectionSettings(
num_blocks=36,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=256,
version=2,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=512,
version=2,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNet V2 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,317 | from typing import List
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class ResNetSectionSettings(object):
"""
Settings to describe how to put together a ResNet based architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
(ie Basic or Bottleneck blocks)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param downsample: True to apply stride 2 for downsampling of the input,
False otherwise
:param proj_channels: The number of channels in the projection for a
bottleneck block, if < 0 then uses basic
:param groups: The number of groups to use for each 3x3 conv (ResNext)
:param use_se: True to use squeeze excite, False otherwise
:param version: 1 for original ResNet model, 2 for ResNet v2 model
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
downsample: bool,
proj_channels: int = -1,
groups: int = 1,
use_se: bool = False,
version: int = 1,
):
if use_se:
# TODO: add support for squeeze excite
raise NotImplementedError("squeeze excite not supported yet")
if version != 1 and version != 2:
raise ValueError(
"unknown version given of {}, only 1 and 2 are supported".format(
version
)
)
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.proj_channels = proj_channels
self.groups = groups
self.use_se = use_se
self.version = version
class ResNet(Module):
"""
ResNet, ResNet V2, ResNext implementations.
:param sec_settings: the settings for each section in the ResNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[ResNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[ResNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: ResNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
if settings.proj_channels > 0 and settings.version == 1:
blocks.append(
_BottleneckBlock(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.proj_channels > 0 and settings.version == 2:
blocks.append(
_BottleneckBlockV2(
in_channels,
settings.out_channels,
settings.proj_channels,
stride,
settings.groups,
)
)
elif settings.version == 1:
blocks.append(_BasicBlock(in_channels, settings.out_channels, stride))
elif settings.version == 2:
blocks.append(_BasicBlockV2(in_channels, settings.out_channels, stride))
else:
raise ValueError(
"could not figure out which block to use given "
"version:{} and proj_channels:{}".format(
settings.version, settings.proj_channels
)
)
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
key=["resnet18", "resnet_18", "resnet-18", "resnetv1_18", "resnetv1-18"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="resnet_v1",
sub_architecture="18",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `resnext152` function. Write a Python function `def resnext152(num_classes: int = 1000, class_type: str = "single") -> ResNet` to solve the following problem:
Standard ResNext 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module
Here is the function:
def resnext152(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNext 152 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3,
in_channels=64,
out_channels=256,
downsample=False,
proj_channels=128,
groups=32,
),
ResNetSectionSettings(
num_blocks=8,
in_channels=256,
out_channels=512,
downsample=True,
proj_channels=256,
groups=32,
),
ResNetSectionSettings(
num_blocks=36,
in_channels=512,
out_channels=1024,
downsample=True,
proj_channels=512,
groups=32,
),
ResNetSectionSettings(
num_blocks=3,
in_channels=1024,
out_channels=2048,
downsample=True,
proj_channels=1024,
groups=32,
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard ResNext 152 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created ResNet Module |
21,318 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu")
if conv.bias is not None:
init.constant_(conv.bias, 0) | null |
21,319 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_batch_norm(norm: BatchNorm2d):
init.constant_(norm.weight, 1.0)
init.constant_(norm.bias, 0.0) | null |
21,320 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_linear(linear: Linear):
init.normal_(linear.weight, 0, 0.01)
init.constant_(linear.bias, 0) | null |
21,321 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg11` function. Write a Python function `def vgg11(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
Standard VGG 11; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg11(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
Standard VGG 11; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=1, in_channels=3, out_channels=64, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=1, in_channels=64, out_channels=128, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=512, out_channels=512, use_batchnorm=False
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard VGG 11; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,322 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg11bn` function. Write a Python function `def vgg11bn(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
VGG 11 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg11bn(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
VGG 11 with batch norm added; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=1, in_channels=3, out_channels=64, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=1, in_channels=64, out_channels=128, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=512, out_channels=512, use_batchnorm=True
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | VGG 11 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,323 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg13` function. Write a Python function `def vgg13(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
Standard VGG 13; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg13(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
Standard VGG 13; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=512, out_channels=512, use_batchnorm=False
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard VGG 13; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,324 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg13bn` function. Write a Python function `def vgg13bn(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
VGG 13 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg13bn(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
VGG 13 with batch norm added; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=256, out_channels=512, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=512, out_channels=512, use_batchnorm=True
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | VGG 13 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,325 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg16` function. Write a Python function `def vgg16(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
Standard VGG 16; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg16(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
Standard VGG 16; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=3, in_channels=128, out_channels=256, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=3, in_channels=512, out_channels=512, use_batchnorm=False
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard VGG 16; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,326 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg16bn` function. Write a Python function `def vgg16bn(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
VGG 16 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg16bn(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
VGG 16 with batch norm added; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=3, in_channels=128, out_channels=256, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=3, in_channels=512, out_channels=512, use_batchnorm=True
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | VGG 16 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,327 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg19` function. Write a Python function `def vgg19(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
Standard VGG 19; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg19(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
Standard VGG 19; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=4, in_channels=128, out_channels=256, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=4, in_channels=256, out_channels=512, use_batchnorm=False
),
VGGSectionSettings(
num_blocks=4, in_channels=512, out_channels=512, use_batchnorm=False
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | Standard VGG 19; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,328 | from typing import List
from torch import Tensor
from torch.nn import (
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class VGGSectionSettings(object):
"""
Settings to describe how to put together a VGG architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section (conv [bn] relu)
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param use_batchnorm: True to put batchnorm after each conv, False otherwise
"""
def __init__(
self, num_blocks: int, in_channels: int, out_channels: int, use_batchnorm: bool
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.use_batchnorm = use_batchnorm
class VGG(Module):
"""
VGG implementation
:param sec_settings: the settings for each section in the vgg model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[VGGSectionSettings],
num_classes: int,
class_type: str,
):
super(VGG, self).__init__()
self.sections = Sequential(
*[VGG.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
def forward(self, inp):
out = self.sections(inp)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: VGGSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
for _ in range(settings.num_blocks):
blocks.append(
_Block(in_channels, settings.out_channels, settings.use_batchnorm)
)
in_channels = settings.out_channels
blocks.append(MaxPool2d(kernel_size=2, stride=2))
return Sequential(*blocks)
key=["vgg11", "vgg_11", "vgg-11"],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="vgg",
sub_architecture="11",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.mlp.6.weight", "classifier.mlp.6.bias"],
The provided code snippet includes necessary dependencies for implementing the `vgg19bn` function. Write a Python function `def vgg19bn(num_classes: int = 1000, class_type: str = "single") -> VGG` to solve the following problem:
VGG 19 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def vgg19bn(num_classes: int = 1000, class_type: str = "single") -> VGG:
"""
VGG 19 with batch norm added; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
VGGSectionSettings(
num_blocks=2, in_channels=3, out_channels=64, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=4, in_channels=128, out_channels=256, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=4, in_channels=256, out_channels=512, use_batchnorm=True
),
VGGSectionSettings(
num_blocks=4, in_channels=512, out_channels=512, use_batchnorm=True
),
]
return VGG(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | VGG 19 with batch norm added; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,329 | from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class MnistNet(Module):
"""
A simple convolutional model created for the MNIST dataset
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
num_classes: int = 10,
class_type: str = "single",
):
super().__init__()
self.blocks = Sequential(
_ConvBNRelu(
in_channels=1, out_channels=16, kernel_size=5, padding=2, stride=1
),
_ConvBNRelu(
in_channels=16, out_channels=32, kernel_size=5, padding=2, stride=2
),
_ConvBNRelu(
in_channels=32, out_channels=64, kernel_size=5, padding=2, stride=1
),
_ConvBNRelu(
in_channels=64, out_channels=128, kernel_size=5, padding=2, stride=2
),
)
self.classifier = _Classifier(
in_channels=128, classes=num_classes, class_type=class_type
)
def forward(self, inp: Tensor):
out = self.blocks(inp)
logits, classes = self.classifier(out)
return logits, classes
key=["mnistnet"],
input_shape=(1, 28, 28),
domain="cv",
sub_domain="classification",
architecture="mnistnet",
sub_architecture=None,
default_dataset="mnist",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `mnist_net` function. Write a Python function `def mnist_net(num_classes: int = 10, class_type: str = "single") -> MnistNet` to solve the following problem:
MnistNet implementation; expected input shape is (B, 1, 28, 28) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MnistNet Module
Here is the function:
def mnist_net(num_classes: int = 10, class_type: str = "single") -> MnistNet:
"""
MnistNet implementation; expected input shape is (B, 1, 28, 28)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MnistNet Module
"""
return MnistNet(num_classes, class_type) | MnistNet implementation; expected input shape is (B, 1, 28, 28) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MnistNet Module |
21,330 | import collections
import json
import logging
import os
import shutil
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy
import onnx
import torch
from onnx import numpy_helper
from packaging import version
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.exporters.onnx_to_deepsparse import ONNXToDeepsparse
from sparseml.onnx.utils import ONNXGraph
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils.helpers import (
adjust_quantization_for_onnx_export,
tensors_export,
tensors_module_forward,
tensors_to_device,
)
from sparseml.pytorch.utils.model import (
is_parallel_model,
save_model,
script_model,
trace_model,
)
from sparseml.utils import clean_path, create_parent_dirs
from sparsezoo.utils import save_onnx, validate_onnx
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
def _flatten_qparams(model: onnx.ModelProto):
# transforms any QuantizeLinear/DequantizeLinear that have
# zero_point/scale with shapes `(1,)` into shape `()`
graph = ONNXGraph(model)
inits_to_flatten = set()
for node in model.graph.node:
if node.op_type in ["QuantizeLinear", "DequantizeLinear"]:
# scale is required if the input is an initializer
scale_init = graph.get_init_by_name(node.input[1])
if scale_init is not None and list(scale_init.dims) == [1]:
inits_to_flatten.add(node.input[1])
# zero_point is optional AND shape must match
# scale. so if scale is (1,), then so will zero point
if len(node.input) == 3:
inits_to_flatten.add(node.input[2])
for i, init in enumerate(model.graph.initializer):
if init.name not in inits_to_flatten:
continue
a = numpy_helper.to_array(init)
assert a.shape == (1,)
b = numpy.array(a[0])
assert b.shape == ()
assert b.dtype == a.dtype
model.graph.initializer[i].CopyFrom(numpy_helper.from_array(b, name=init.name))
def _fold_identity_initializers(model: onnx.ModelProto):
# folds any Identity nodes that have a single input (which is an initializer)
# and a single output
matches = []
graph = ONNXGraph(model)
def is_match(node: onnx.NodeProto) -> bool:
return (
node.op_type == "Identity"
and len(node.input) == 1
and len(node.output) == 1
and node.input[0] in graph._name_to_initializer
)
for node in model.graph.node:
if not is_match(node):
continue
matches.append(node)
# find any node in the graph that uses the output of `node`
# as an input. replace the input with `node`'s input
for other in graph.get_node_children(node):
for i, other_input_i in enumerate(other.input):
# NOTE: this just replaces the str ids
if other_input_i == node.output[0]:
other.input[i] = node.input[0]
for node in matches:
model.graph.node.remove(node)
def _get_output_names(out: Any):
"""
Get name of output tensors
:param out: outputs of the model
:return: list of names
"""
output_names = None
if isinstance(out, Tensor):
output_names = ["output"]
elif hasattr(out, "keys") and callable(out.keys):
output_names = list(out.keys())
elif isinstance(out, Iterable):
output_names = ["output_{}".format(index) for index, _ in enumerate(iter(out))]
return output_names
def _wrap_batch_norms(module: Module) -> bool:
# wrap all batch norm layers in module with a trivial wrapper
# to prevent BN fusing during export
batch_norms_wrapped = False
for name, submodule in module.named_modules():
if (
isinstance(submodule, torch.nn.BatchNorm1d)
or isinstance(submodule, torch.nn.BatchNorm2d)
or isinstance(submodule, torch.nn.BatchNorm3d)
):
submodule_path = name.split(".")
parent_module = _get_submodule(module, submodule_path[:-1])
setattr(parent_module, submodule_path[-1], _AddNoOpWrapper(submodule))
batch_norms_wrapped = True
return batch_norms_wrapped
def _delete_trivial_onnx_adds(model: onnx.ModelProto):
# delete all add nodes in the graph with second inputs as constant nodes set to 0
add_nodes = [node for node in model.graph.node if node.op_type == "Add"]
for add_node in add_nodes:
try:
add_const_node = [
node for node in model.graph.node if node.output[0] == add_node.input[1]
][0]
add_const_val = numpy_helper.to_array(add_const_node.attribute[0].t)
if numpy.all(add_const_val == 0.0):
# update graph edges
parent_node = [
node
for node in model.graph.node
if add_node.input[0] in node.output
]
if not parent_node:
continue
parent_node[0].output[0] = add_node.output[0]
# remove node and constant
model.graph.node.remove(add_node)
model.graph.node.remove(add_const_node)
except Exception: # skip node on any error
continue
def _unwrap_batchnorms(model: onnx.ModelProto):
for init in model.graph.initializer:
init.name = init.name.replace(".bn_wrapper_replace_me", "")
for node in model.graph.node:
for idx in range(len(node.input)):
node.input[idx] = node.input[idx].replace(".bn_wrapper_replace_me", "")
for idx in range(len(node.output)):
node.output[idx] = node.output[idx].replace(".bn_wrapper_replace_me", "")
validate_onnx(model)
class ONNXToDeepsparse(BaseExporter):
"""
Optimizes an `onnx.ModelProto` for the deepsparse engine by applying a
series of transformations to a onnx graph with quantize operations.
Usage:
```python
# could be a model retrieved previously from TorchToOnnx() or somewhere else
onnx_model: onnx.ModelProto = ...
exporter = ONNXToDeepsparse()
exporter.export(onnx_model, "model.onnx")
```
You can also just optimize the model directly without saving to disk:
```python
onnx_model: onnx.ModelProto = ...
exporter = ONNXToDeepsparse()
optimized_model = exporter.apply(onnx_model)
```
:param use_qlinearconv: Set True to use legacy QLinearConv format instead
of ConvInteger. QLinearConv requires output activations be quantized
in the quantization recipe. (This was the default behavior prior to
sparseml 0.12). Default is False
:param skip_input_quantize: if True, the export flow will attempt to delete
the first Quantize Linear Nodes(s) immediately after model input and set
the model input type to UINT8. Default is False
:param inplace: If true, does conversion of model in place. Default is true
:param export_input_model: If true, saves the input onnx model alongside the
optimized model.
"""
def __init__(
self,
use_qlinear_conv: bool = False,
use_qlinear_matmul: bool = False,
skip_input_quantize: bool = False,
inplace: bool = True,
export_input_model: bool = False,
):
self.inplace = inplace
self.export_input_model = export_input_model
transforms = [
sparseml_transforms.ConstantsToInitializers(),
sparseml_transforms.FoldIdentityInitializers(),
sparseml_transforms.InitializersToUint8(),
sparseml_transforms.FlattenQParams(),
sparseml_transforms.FoldConvDivBn(),
sparseml_transforms.DeleteRepeatedQdq(),
sparseml_transforms.QuantizeQATEmbedding(),
sparseml_transforms.PropagateEmbeddingQuantization(),
sparseml_transforms.PropagateDequantThroughSplit(),
]
if use_qlinear_matmul:
transforms.append(
sparseml_transforms.MatMulToQLinearMatMul(),
)
transforms.extend(
[
sparseml_transforms.MatMulAddToMatMulIntegerAddCastMul(),
sparseml_transforms.MatMulToMatMulIntegerCastMul(),
sparseml_transforms.FoldReLUQuants(),
sparseml_transforms.ConvToQLinearConv()
if use_qlinear_conv
else sparseml_transforms.ConvToConvIntegerAddCastMul(),
sparseml_transforms.GemmToQLinearMatMul(),
sparseml_transforms.GemmToMatMulIntegerAddCastMul(),
sparseml_transforms.QuantizeResiduals(),
sparseml_transforms.RemoveDuplicateQConvWeights(),
sparseml_transforms.RemoveDuplicateQuantizeOps(),
]
)
if skip_input_quantize:
transforms.append(sparseml_transforms.SkipInputQuantize())
super().__init__(transforms)
def pre_validate(self, model: Union[onnx.ModelProto, str, Path]) -> onnx.ModelProto:
if isinstance(model, (str, Path)):
model = onnx.load(str(model))
if not isinstance(model, onnx.ModelProto):
raise TypeError(f"Expected onnx.ModelProto, found {type(model)}")
return model if self.inplace else deepcopy(model)
def post_validate(self, model: onnx.ModelProto) -> onnx.ModelProto:
# sanity check
if not isinstance(model, onnx.ModelProto):
raise TypeError(f"Expected onnx.ModelProto, found {type(model)}")
return model
def export(
self,
pre_transforms_model: Union[ModelProto, str],
file_path: str,
do_split_external_data: bool = True,
):
if not isinstance(pre_transforms_model, ModelProto):
pre_transforms_model = onnx.load(pre_transforms_model)
if self.export_input_model or os.getenv("SAVE_PREQAT_ONNX", False):
save_onnx(
pre_transforms_model,
file_path.replace(".onnx", ".preqat.onnx"),
do_split_external_data=do_split_external_data,
)
post_transforms_model: onnx.ModelProto = self.apply(pre_transforms_model)
save_onnx(
post_transforms_model,
file_path,
do_split_external_data=do_split_external_data,
)
TORCH_DEFAULT_ONNX_OPSET = _default_opset()
def tensors_to_device(
tensors: Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]], device: str
) -> Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]]:
"""
Default function for putting a tensor or collection of tensors to the proper device.
Returns the tensor references after being placed on the proper device.
Supported use cases:
- single tensor
- Dictionary of single tensors
- Dictionary of iterable of tensors
- Dictionary of dictionary of tensors
- Iterable of single tensors
- Iterable of iterable of tensors
- Iterable of dictionary of tensors
:param tensors: the tensors or collection of tensors to put onto a device
:param device: the string representing the device to put the tensors on,
ex: 'cpu', 'cuda', 'cuda:1'
:return: the tensors or collection of tensors after being placed on the device
"""
if isinstance(tensors, Tensor):
return tensors.to(device)
if isinstance(tensors, OrderedDict):
return OrderedDict(
[(key, tensors_to_device(tens, device)) for key, tens in tensors.items()]
)
if isinstance(tensors, Mapping):
return {key: tensors_to_device(tens, device) for key, tens in tensors.items()}
if isinstance(tensors, tuple):
return tuple(tensors_to_device(tens, device) for tens in tensors)
if isinstance(tensors, Iterable):
return [tensors_to_device(tens, device) for tens in tensors]
raise ValueError(
"unrecognized type for tensors given of {}".format(tensors.__class__.__name__)
)
def tensors_module_forward(
tensors: Union[Tensor, Iterable[Tensor], Mapping[Any, Tensor]],
module: Module,
check_feat_lab_inp: bool = True,
) -> Any:
"""
Default function for calling into a model with data for a forward execution.
Returns the model result.
Note, if an iterable the features to be passed into the model are considered
to be at index 0 and other indices are for labels.
Supported use cases: single tensor,
iterable with first tensor taken as the features to pass into the model
:param tensors: the data to be passed into the model, if an iterable the features
to be passed into the model are considered to be at index 0 and other indices
are for labels
:param module: the module to pass the data into
:param check_feat_lab_inp: True to check if the incoming tensors looks like
it's made up of features and labels ie a tuple or list with 2 items
(typical output from a data loader) and will call into the model with just
the first element assuming it's the features False to not check
:return: the result of calling into the model for a forward pass
"""
if (
(isinstance(tensors, tuple) or isinstance(tensors, List))
and len(tensors) == 2
and check_feat_lab_inp
):
# assume if this is a list or tuple of 2 items that it is made up of
# (features, labels) pass the features into a recursive call for the model
return tensors_module_forward(tensors[0], module, check_feat_lab_inp=False)
if isinstance(tensors, Tensor):
return module(tensors)
if isinstance(tensors, Mapping):
return module(**tensors)
if isinstance(tensors, Iterable):
return module(*tensors)
raise ValueError(
"unrecognized type for data given of {}".format(tensors.__class__.__name__)
)
The provided code snippet includes necessary dependencies for implementing the `export_onnx` function. Write a Python function `def export_onnx( module: Module, sample_batch: Any, file_path: str, opset: int = TORCH_DEFAULT_ONNX_OPSET, disable_bn_fusing: bool = True, convert_qat: bool = False, dynamic_axes: Union[str, Dict[str, List[int]]] = None, skip_input_quantize: bool = False, **export_kwargs, )` to solve the following problem:
Export an onnx file for the current module and for a sample batch. Sample batch used to feed through the model to freeze the graph for a particular execution. :param module: torch Module object to export :param sample_batch: the batch to export an onnx for, handles creating the static graph for onnx as well as setting dimensions :param file_path: path to the onnx file to save :param opset: onnx opset to use for exported model. Default is based on torch version. :param disable_bn_fusing: torch >= 1.7.0 only. Set True to disable batch norm fusing during torch export. Default and suggested setting is True. Batch norm fusing will change the exported parameter names as well as affect sensitivity analyses of the exported graph. Additionally, the DeepSparse inference engine, and other engines, perform batch norm fusing at model compilation. :param convert_qat: if True and quantization aware training is detected in the module being exported, the resulting QAT ONNX model will be converted to a fully quantized ONNX model using `ONNXToDeepsparse`. Default is False. :param dynamic_axes: dictionary of input or output names to list of dimensions of those tensors that should be exported as dynamic. May input 'batch' to set the first dimension of all inputs and outputs to dynamic. Default is an empty dict :param skip_input_quantize: if True, the export flow will attempt to delete the first Quantize Linear Nodes(s) immediately after model input and set the model input type to UINT8. Default is False :param export_kwargs: kwargs to be passed as is to the torch.onnx.export api call. Useful to pass in dyanmic_axes, input_names, output_names, etc. See more on the torch.onnx.export api spec in the PyTorch docs: https://pytorch.org/docs/stable/onnx.html
Here is the function:
def export_onnx(
module: Module,
sample_batch: Any,
file_path: str,
opset: int = TORCH_DEFAULT_ONNX_OPSET,
disable_bn_fusing: bool = True,
convert_qat: bool = False,
dynamic_axes: Union[str, Dict[str, List[int]]] = None,
skip_input_quantize: bool = False,
**export_kwargs,
):
"""
Export an onnx file for the current module and for a sample batch.
Sample batch used to feed through the model to freeze the graph for a
particular execution.
:param module: torch Module object to export
:param sample_batch: the batch to export an onnx for, handles creating the
static graph for onnx as well as setting dimensions
:param file_path: path to the onnx file to save
:param opset: onnx opset to use for exported model.
Default is based on torch version.
:param disable_bn_fusing: torch >= 1.7.0 only. Set True to disable batch norm
fusing during torch export. Default and suggested setting is True. Batch
norm fusing will change the exported parameter names as well as affect
sensitivity analyses of the exported graph. Additionally, the DeepSparse
inference engine, and other engines, perform batch norm fusing at model
compilation.
:param convert_qat: if True and quantization aware training is detected in
the module being exported, the resulting QAT ONNX model will be converted
to a fully quantized ONNX model using `ONNXToDeepsparse`. Default
is False.
:param dynamic_axes: dictionary of input or output names to list of dimensions
of those tensors that should be exported as dynamic. May input 'batch'
to set the first dimension of all inputs and outputs to dynamic. Default
is an empty dict
:param skip_input_quantize: if True, the export flow will attempt to delete
the first Quantize Linear Nodes(s) immediately after model input and set
the model input type to UINT8. Default is False
:param export_kwargs: kwargs to be passed as is to the torch.onnx.export api
call. Useful to pass in dyanmic_axes, input_names, output_names, etc.
See more on the torch.onnx.export api spec in the PyTorch docs:
https://pytorch.org/docs/stable/onnx.html
"""
if _PARSED_TORCH_VERSION >= version.parse("1.10.0") and opset < 13 and convert_qat:
raise ValueError(
"Exporting onnx with QAT and opset < 13 may result in errors. "
"Please use opset>=13 with QAT. "
"See https://github.com/pytorch/pytorch/issues/77455 for more info. "
)
if not export_kwargs:
export_kwargs = {}
if isinstance(sample_batch, Dict) and not isinstance(
sample_batch, collections.OrderedDict
):
warnings.warn(
"Sample inputs passed into the ONNX exporter should be in "
"the same order defined in the model forward function. "
"Consider using OrderedDict for this purpose.",
UserWarning,
)
sample_batch = tensors_to_device(sample_batch, "cpu")
create_parent_dirs(file_path)
module = deepcopy(module).cpu()
with torch.no_grad():
out = tensors_module_forward(sample_batch, module, check_feat_lab_inp=False)
if "input_names" not in export_kwargs:
if isinstance(sample_batch, Tensor):
export_kwargs["input_names"] = ["input"]
elif isinstance(sample_batch, Dict):
export_kwargs["input_names"] = list(sample_batch.keys())
sample_batch = tuple(
[sample_batch[f] for f in export_kwargs["input_names"]]
)
elif isinstance(sample_batch, Iterable):
export_kwargs["input_names"] = [
"input_{}".format(index) for index, _ in enumerate(iter(sample_batch))
]
if isinstance(sample_batch, List):
sample_batch = tuple(sample_batch) # torch.onnx.export requires tuple
if "output_names" not in export_kwargs:
export_kwargs["output_names"] = _get_output_names(out)
# Set all batch sizes to be dynamic
if dynamic_axes is not None:
for tensor_name in export_kwargs["input_names"] + export_kwargs["output_names"]:
if tensor_name not in dynamic_axes:
dynamic_axes[tensor_name] = {0: "batch"}
else:
dynamic_axes[tensor_name][0] = "batch"
else:
dynamic_axes = {
tensor_name: {0: "batch"}
for tensor_name in (
export_kwargs["input_names"] + export_kwargs["output_names"]
)
}
# disable active quantization observers because they cannot be exported
disabled_observers = []
for submodule in module.modules():
if (
hasattr(submodule, "observer_enabled")
and submodule.observer_enabled[0] == 1
):
submodule.observer_enabled[0] = 0
disabled_observers.append(submodule)
is_quant_module = any(
hasattr(submodule, "qconfig") and submodule.qconfig
for submodule in module.modules()
)
batch_norms_wrapped = False
if (
_PARSED_TORCH_VERSION >= version.parse("1.7")
and not is_quant_module
and disable_bn_fusing
):
# prevent batch norm fusing by adding a trivial operation before every
# batch norm layer
batch_norms_wrapped = _wrap_batch_norms(module)
kwargs = dict(
model=module,
args=sample_batch,
f=file_path,
verbose=False,
opset_version=opset,
dynamic_axes=dynamic_axes,
**export_kwargs,
)
if _PARSED_TORCH_VERSION < version.parse("1.10.0"):
kwargs["strip_doc_string"] = True
else:
kwargs["training"] = torch.onnx.TrainingMode.PRESERVE
kwargs["do_constant_folding"] = not module.training
kwargs["keep_initializers_as_inputs"] = False
torch.onnx.export(**kwargs)
# re-enable disabled quantization observers
for submodule in disabled_observers:
submodule.observer_enabled[0] = 1
# onnx file fixes
onnx_model = onnx.load(file_path)
_fold_identity_initializers(onnx_model)
_flatten_qparams(onnx_model)
if batch_norms_wrapped:
# fix changed batch norm names
_unwrap_batchnorms(onnx_model)
# clean up graph from any injected / wrapped operations
_delete_trivial_onnx_adds(onnx_model)
save_onnx(onnx_model, file_path)
if convert_qat and is_quant_module:
use_qlinear_conv = hasattr(module, "export_with_qlinearconv") and (
module.export_with_qlinearconv
)
use_qlinear_matmul = hasattr(module, "export_with_qlinearmatmul") and (
module.export_with_qlinearmatmul
)
exporter = ONNXToDeepsparse(
use_qlinear_conv=use_qlinear_conv,
use_qlinear_matmul=use_qlinear_matmul,
skip_input_quantize=skip_input_quantize,
)
exporter.export(pre_transforms_model=file_path, file_path=file_path) | Export an onnx file for the current module and for a sample batch. Sample batch used to feed through the model to freeze the graph for a particular execution. :param module: torch Module object to export :param sample_batch: the batch to export an onnx for, handles creating the static graph for onnx as well as setting dimensions :param file_path: path to the onnx file to save :param opset: onnx opset to use for exported model. Default is based on torch version. :param disable_bn_fusing: torch >= 1.7.0 only. Set True to disable batch norm fusing during torch export. Default and suggested setting is True. Batch norm fusing will change the exported parameter names as well as affect sensitivity analyses of the exported graph. Additionally, the DeepSparse inference engine, and other engines, perform batch norm fusing at model compilation. :param convert_qat: if True and quantization aware training is detected in the module being exported, the resulting QAT ONNX model will be converted to a fully quantized ONNX model using `ONNXToDeepsparse`. Default is False. :param dynamic_axes: dictionary of input or output names to list of dimensions of those tensors that should be exported as dynamic. May input 'batch' to set the first dimension of all inputs and outputs to dynamic. Default is an empty dict :param skip_input_quantize: if True, the export flow will attempt to delete the first Quantize Linear Nodes(s) immediately after model input and set the model input type to UINT8. Default is False :param export_kwargs: kwargs to be passed as is to the torch.onnx.export api call. Useful to pass in dyanmic_axes, input_names, output_names, etc. See more on the torch.onnx.export api spec in the PyTorch docs: https://pytorch.org/docs/stable/onnx.html |
21,331 | import collections
import json
import logging
import os
import shutil
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy
import onnx
import torch
from onnx import numpy_helper
from packaging import version
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.exporters.onnx_to_deepsparse import ONNXToDeepsparse
from sparseml.onnx.utils import ONNXGraph
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils.helpers import (
adjust_quantization_for_onnx_export,
tensors_export,
tensors_module_forward,
tensors_to_device,
)
from sparseml.pytorch.utils.model import (
is_parallel_model,
save_model,
script_model,
trace_model,
)
from sparseml.utils import clean_path, create_parent_dirs
from sparsezoo.utils import save_onnx, validate_onnx
def _copy_file(src: str, target: str):
if not os.path.exists(src):
raise ValueError(
f"Attempting to copy file from {src}, but the file does not exist."
)
shutil.copyfile(src, target) | null |
21,332 | import collections
import json
import logging
import os
import shutil
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy
import onnx
import torch
from onnx import numpy_helper
from packaging import version
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.exporters.onnx_to_deepsparse import ONNXToDeepsparse
from sparseml.onnx.utils import ONNXGraph
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils.helpers import (
adjust_quantization_for_onnx_export,
tensors_export,
tensors_module_forward,
tensors_to_device,
)
from sparseml.pytorch.utils.model import (
is_parallel_model,
save_model,
script_model,
trace_model,
)
from sparseml.utils import clean_path, create_parent_dirs
from sparsezoo.utils import save_onnx, validate_onnx
CONFIG_JSON_NAME = "config.json"
_LOGGER = logging.getLogger(__name__)
def _create_config_file(save_dir: str) -> str:
config_file_path = os.path.join(save_dir, CONFIG_JSON_NAME)
with open(config_file_path, "w"):
# create empty json file
pass
_LOGGER.info(f"Created {CONFIG_JSON_NAME} file at {save_dir}")
return config_file_path | null |
21,333 | import collections
import json
import logging
import os
import shutil
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy
import onnx
import torch
from onnx import numpy_helper
from packaging import version
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.exporters.onnx_to_deepsparse import ONNXToDeepsparse
from sparseml.onnx.utils import ONNXGraph
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils.helpers import (
adjust_quantization_for_onnx_export,
tensors_export,
tensors_module_forward,
tensors_to_device,
)
from sparseml.pytorch.utils.model import (
is_parallel_model,
save_model,
script_model,
trace_model,
)
from sparseml.utils import clean_path, create_parent_dirs
from sparsezoo.utils import save_onnx, validate_onnx
CONFIG_JSON_NAME = "config.json"
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_save_label_to_class_mapping` function. Write a Python function `def _save_label_to_class_mapping( labels_to_class_mapping: Union[str, Dict[int, str]], config_file_path: str, key_name: str = "labels_to_class_mapping", )` to solve the following problem:
Appends `labels_to_class_mapping` information to the config.json file: - new key: `labels_to_class_mapping` - new value: a dictionary that maps the integer labels to string class names If config.json already contains `labels_to_class_mapping`, this information will be overwritten :param labels_to_class_mapping: information about the mapping from integer labels to string class names. Can be either a string (path to the .json serialized dictionary) or a dictionary. :param config_file_path: path to the directory of the `config.json` file. :param key_name: the key under which the information about the mapping will be stored inside the config.json file
Here is the function:
def _save_label_to_class_mapping(
labels_to_class_mapping: Union[str, Dict[int, str]],
config_file_path: str,
key_name: str = "labels_to_class_mapping",
):
"""
Appends `labels_to_class_mapping` information to the config.json file:
- new key: `labels_to_class_mapping`
- new value: a dictionary that maps the integer
labels to string class names
If config.json already contains `labels_to_class_mapping`,
this information will be overwritten
:param labels_to_class_mapping: information about the mapping from
integer labels to string class names. Can be either a string
(path to the .json serialized dictionary) or a dictionary.
:param config_file_path: path to the directory of the `config.json` file.
:param key_name: the key under which the information about
the mapping will be stored inside the config.json file
"""
is_config_empty = os.stat(config_file_path).st_size == 0
if not is_config_empty:
with open(config_file_path, "r") as outfile:
config = json.load(outfile.read())
else:
config = {}
# check whether the label names are not already present in the config.
if key_name in config.keys():
_LOGGER.warning(
f"File: {CONFIG_JSON_NAME} already contains key {key_name}. "
f"{key_name} data will be overwritten"
)
if isinstance(labels_to_class_mapping, str):
with open(labels_to_class_mapping) as outfile:
labels_to_class_mapping = json.load(outfile)
config[key_name] = labels_to_class_mapping
with open(config_file_path, "w") as outfile:
json.dump(config, outfile)
_LOGGER.info(
f"Appended {key_name} data to {CONFIG_JSON_NAME} at {config_file_path}"
) | Appends `labels_to_class_mapping` information to the config.json file: - new key: `labels_to_class_mapping` - new value: a dictionary that maps the integer labels to string class names If config.json already contains `labels_to_class_mapping`, this information will be overwritten :param labels_to_class_mapping: information about the mapping from integer labels to string class names. Can be either a string (path to the .json serialized dictionary) or a dictionary. :param config_file_path: path to the directory of the `config.json` file. :param key_name: the key under which the information about the mapping will be stored inside the config.json file |
21,334 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_one_hot_label_mapping(labels: Tensor, class_names: Dict[Any, str]):
def _apply_label(label: int):
one_hot_label = [0] * len(class_names.keys())
one_hot_label[label] = 1
return one_hot_label
arr = [
numpy.array([_apply_label(label) for label in labels]),
numpy.array([[val for _, val in class_names.items()]] * len(labels)),
]
return arr
def cifar10_label_mapping(labels: Tensor):
return apply_one_hot_label_mapping(labels, CIFAR_10_CLASSES) | null |
21,335 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_one_hot_label_mapping(labels: Tensor, class_names: Dict[Any, str]):
def _apply_label(label: int):
one_hot_label = [0] * len(class_names.keys())
one_hot_label[label] = 1
return one_hot_label
arr = [
numpy.array([_apply_label(label) for label in labels]),
numpy.array([[val for _, val in class_names.items()]] * len(labels)),
]
return arr
def imagenette_label_mapping(labels: Tensor):
return apply_one_hot_label_mapping(
labels,
IMAGENETTE_CLASSES,
) | null |
21,336 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_one_hot_label_mapping(labels: Tensor, class_names: Dict[Any, str]):
def _apply_label(label: int):
one_hot_label = [0] * len(class_names.keys())
one_hot_label[label] = 1
return one_hot_label
arr = [
numpy.array([_apply_label(label) for label in labels]),
numpy.array([[val for _, val in class_names.items()]] * len(labels)),
]
return arr
def imagenet_label_mapping(labels: Tensor):
return apply_one_hot_label_mapping(
labels,
IMAGENET_CLASSES,
) | null |
21,337 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_one_hot_label_mapping(labels: Tensor, class_names: Dict[Any, str]):
def _apply_label(label: int):
one_hot_label = [0] * len(class_names.keys())
one_hot_label[label] = 1
return one_hot_label
arr = [
numpy.array([_apply_label(label) for label in labels]),
numpy.array([[val for _, val in class_names.items()]] * len(labels)),
]
return arr
def mnist_label_mapping(labels: Tensor):
return apply_one_hot_label_mapping(labels, {idx: str(idx) for idx in range(10)}) | null |
21,338 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def coco_yolo_2017_mapping(labels: Iterable[Tensor]):
class_names = [val for _, val in COCO_CLASSES_80.items()]
return [
labels[0],
[numpy.array([class_names] * labels[0].shape[0])],
] | null |
21,339 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_box_label_mapping(labels: Iterable[Tensor], class_names: Dict[Any, str]):
def coco_mapping(labels: Iterable[Tensor]):
return apply_box_label_mapping(labels, COCO_CLASSES) | null |
21,340 | from typing import Any, Dict, Iterable
import numpy
from torch import Tensor
from sparseml.utils.datasets import (
CIFAR_10_CLASSES,
COCO_CLASSES,
COCO_CLASSES_80,
IMAGENET_CLASSES,
IMAGENETTE_CLASSES,
VOC_CLASSES,
)
def apply_box_label_mapping(labels: Iterable[Tensor], class_names: Dict[Any, str]):
class_names = [
class_names[i] if i in class_names else ""
for i in range(max(class_names.keys()) + 1)
]
return [
labels[0],
labels[1],
[numpy.array([class_names] * labels[0].shape[0])],
]
def voc_mapping(labels: Iterable[Tensor]):
return apply_box_label_mapping(labels, VOC_CLASSES) | null |
21,341 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
The provided code snippet includes necessary dependencies for implementing the `load_optimizer` function. Write a Python function `def load_optimizer( path: str, optimizer: Optimizer, map_location: Union[None, str] = "cpu" )` to solve the following problem:
Load the state dict into an optimizer from a given file. :param path: the path to the pth file to load the state dict from :param optimizer: the optimizer to load the state dict into :param map_location: the location to map the values to when loading the :return: the epoch saved in the file, if any
Here is the function:
def load_optimizer(
path: str, optimizer: Optimizer, map_location: Union[None, str] = "cpu"
):
"""
Load the state dict into an optimizer from a given file.
:param path: the path to the pth file to load the state dict from
:param optimizer: the optimizer to load the state dict into
:param map_location: the location to map the values to when loading the
:return: the epoch saved in the file, if any
"""
model_dict = torch.load(path, map_location=map_location)
optimizer.load_state_dict(model_dict["optimizer"]) | Load the state dict into an optimizer from a given file. :param path: the path to the pth file to load the state dict from :param optimizer: the optimizer to load the state dict into :param map_location: the location to map the values to when loading the :return: the epoch saved in the file, if any |
21,342 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
def load_epoch(path: str, map_location: Union[None, str] = "cpu") -> Union[int, None]:
model_dict = torch.load(path, map_location=map_location)
if "epoch" in model_dict:
return model_dict["epoch"]
return None | null |
21,343 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
The provided code snippet includes necessary dependencies for implementing the `trace_model` function. Write a Python function `def trace_model( path: str, model: Module, sample_batch: Any, )` to solve the following problem:
Convenience function which traces the provided module using the sample batch into a TorchScript script and saves to provied path. :param path: path to save torchscript :param model: module to convert to TorchScript :param sample_batch: sample batch to trace module with
Here is the function:
def trace_model(
path: str,
model: Module,
sample_batch: Any,
):
"""
Convenience function which traces the provided module using the sample batch
into a TorchScript script and saves to provied path.
:param path: path to save torchscript
:param model: module to convert to TorchScript
:param sample_batch: sample batch to trace module with
"""
script = torch.jit.trace_module(model, {"forward": sample_batch})
torch.jit.save(script, path) | Convenience function which traces the provided module using the sample batch into a TorchScript script and saves to provied path. :param path: path to save torchscript :param model: module to convert to TorchScript :param sample_batch: sample batch to trace module with |
21,344 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
The provided code snippet includes necessary dependencies for implementing the `script_model` function. Write a Python function `def script_model( path: str, model: Module, )` to solve the following problem:
Convenience function which scripts the provided module into a TorchScript script and saves to provied path. :param path: path to save torchscript :param model: module to convert to torchscript
Here is the function:
def script_model(
path: str,
model: Module,
):
"""
Convenience function which scripts the provided module into a TorchScript script
and saves to provied path.
:param path: path to save torchscript
:param model: module to convert to torchscript
"""
script = torch.jit.script(model)
torch.jit.save(script, path) | Convenience function which scripts the provided module into a TorchScript script and saves to provied path. :param path: path to save torchscript :param model: module to convert to torchscript |
21,345 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
def is_parallel_model(model: Module) -> bool:
"""
:param model: the model to test
:return: True if the given model is wrapped as a DataPararallel or
DistributedDataParallel Module. False otherwise
"""
return isinstance(model, DataParallel) or (
DDP is not None and isinstance(model, DDP)
)
def create_parent_dirs(path: str):
"""
:param path: the file path to try to create the parent directories for
"""
parent = os.path.dirname(path)
create_dirs(parent)
The provided code snippet includes necessary dependencies for implementing the `save_model` function. Write a Python function `def save_model( path: str, model: Module, optimizer: Optimizer = None, recipe: Optional[str] = None, epoch: Optional[int] = None, use_zipfile_serialization_if_available: bool = True, include_modifiers: bool = False, arch_key: Optional[str] = None, )` to solve the following problem:
Save a model's state dict into a file at the given path. Additionally can save an optimizer's state and the current epoch. :param path: the path to save the file the states to :param model: the model to save state for :param optimizer: the optimizer, if any, to save state for :param recipe: the recipe used to obtain the model :param epoch: the epoch to save :param use_zipfile_serialization_if_available: for torch >= 1.6.0 only exports the model's state dict using the new zipfile serialization :param include_modifiers: if True, and a ScheduledOptimizer is provided as the optimizer, the associated ScheduledModifierManager and its Modifiers will be exported under the 'manager' key. Default is False :param arch_key: if provided, the `arch_key` will be saved in the checkpoint
Here is the function:
def save_model(
path: str,
model: Module,
optimizer: Optimizer = None,
recipe: Optional[str] = None,
epoch: Optional[int] = None,
use_zipfile_serialization_if_available: bool = True,
include_modifiers: bool = False,
arch_key: Optional[str] = None,
):
"""
Save a model's state dict into a file at the given path.
Additionally can save an optimizer's state and the current epoch.
:param path: the path to save the file the states to
:param model: the model to save state for
:param optimizer: the optimizer, if any, to save state for
:param recipe: the recipe used to obtain the model
:param epoch: the epoch to save
:param use_zipfile_serialization_if_available: for torch >= 1.6.0 only
exports the model's state dict using the new zipfile serialization
:param include_modifiers: if True, and a ScheduledOptimizer is provided
as the optimizer, the associated ScheduledModifierManager and its
Modifiers will be exported under the 'manager' key. Default is False
:param arch_key: if provided, the `arch_key` will be saved in the
checkpoint
"""
create_parent_dirs(path)
if is_parallel_model(model):
model = model.module
save_dict = {"state_dict": OrderedDict()}
# make sure we have the model state_dict on cpu
for key, state in model.state_dict().items():
copy = torch.zeros(state.shape)
copy.copy_(state)
save_dict["state_dict"][key] = copy
if optimizer:
save_dict["optimizer"] = optimizer.state_dict()
if recipe:
save_dict["recipe"] = recipe
if epoch is not None:
save_dict["epoch"] = epoch
if include_modifiers and optimizer and hasattr(optimizer, "manager_state_dict"):
save_dict["manager"] = optimizer.manager_state_dict()
elif include_modifiers and optimizer and hasattr(optimizer, "wrapped_manager"):
save_dict["manager"] = optimizer.wrapped_manager.state_dict()
if arch_key:
save_dict["arch_key"] = arch_key
if version.parse(torch.__version__) < version.parse("1.6"):
torch.save(save_dict, path)
else:
torch.save(
save_dict,
path,
_use_new_zipfile_serialization=use_zipfile_serialization_if_available,
) | Save a model's state dict into a file at the given path. Additionally can save an optimizer's state and the current epoch. :param path: the path to save the file the states to :param model: the model to save state for :param optimizer: the optimizer, if any, to save state for :param recipe: the recipe used to obtain the model :param epoch: the epoch to save :param use_zipfile_serialization_if_available: for torch >= 1.6.0 only exports the model's state dict using the new zipfile serialization :param include_modifiers: if True, and a ScheduledOptimizer is provided as the optimizer, the associated ScheduledModifierManager and its Modifiers will be exported under the 'manager' key. Default is False :param arch_key: if provided, the `arch_key` will be saved in the checkpoint |
21,346 | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import DataParallel, Module
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.utils.helpers import (
download_framework_model_by_recipe_type,
thin_model_from_checkpoint,
)
from sparseml.utils.helpers import create_parent_dirs
from sparsezoo import Model
try:
from torch.nn.parallel import DistributedDataParallel as DDP
ddp_import_error = None
except Exception as ddp_error:
DDP = None
ddp_import_error = ddp_error
def parallelize_model(model: Module, ids: Union[None, List[int]]) -> Module:
"""
Data parallelize a model across multiple devices
:param model: the model to parallelize across multiple devices
:param ids: the ides of the devices to parallelize across
:return: a parallelized model
"""
return _DataParallel(model, ids)
def device_to_name_ids(device: str) -> Tuple[str, Union[None, List[int]]]:
"""
Split a device string into a device and ids
:param device: the device string to push to; ex: cpu, cuda, cuda:0,1
:return: a tuple containing the device string and devices
"""
split = device.split(":")
name = split[0]
if name == "cpu":
return name, None
if name != "cuda" or not torch.cuda.is_available():
raise ValueError("{} device not available on this system".format(name))
if len(split) < 2:
return name, None
ids = [int(id_) for id_ in split[1].split(",")]
count = torch.cuda.device_count()
for id_ in ids:
if id_ >= count:
raise ValueError("{} device id not available on this system".format(id_))
if len(ids) == 1:
return "{}:{}".format(name, ids[0]), None
return name, ids
The provided code snippet includes necessary dependencies for implementing the `model_to_device` function. Write a Python function `def model_to_device( model: Module, device: Union[str, int], ddp: bool = False, ) -> Tuple[Module, str, Union[None, List[int]]]` to solve the following problem:
The model to push onto a device or multiple devices. :param model: the model to push to a device :param device: the device string to push to; ex: cpu, cuda, cuda:0,1. For DDP, device should be the local_rank int value; ex: 0 :param ddp: set True to wrap module as a DDP object. If True, device should be set to the local_rank int value. Default is False :return: a tuple containing the model on desired device(s), the device name, and the ids for the device
Here is the function:
def model_to_device(
model: Module,
device: Union[str, int],
ddp: bool = False,
) -> Tuple[Module, str, Union[None, List[int]]]:
"""
The model to push onto a device or multiple devices.
:param model: the model to push to a device
:param device: the device string to push to; ex: cpu, cuda, cuda:0,1. For
DDP, device should be the local_rank int value; ex: 0
:param ddp: set True to wrap module as a DDP object. If True, device should
be set to the local_rank int value. Default is False
:return: a tuple containing the model on desired device(s),
the device name, and the ids for the device
"""
if not ddp:
device, ids = device_to_name_ids(device)
if ids is not None:
model = parallelize_model(model, ids)
model = model.to(device)
else:
if DDP is None:
raise ddp_import_error
assert isinstance(
device, int
), "For DDP, device must be set to a local_rank int value"
assert device < torch.cuda.device_count(), (
"Device local rank must be less than the number of available cuda devices. "
"Received local rank {} with device count=={}"
).format(device, torch.cuda.device_count())
model = model.to(device)
model = DDP(model, device_ids=[device], output_device=device)
ids = [device]
device = "cuda:{}".format(device)
return model, device, ids | The model to push onto a device or multiple devices. :param model: the model to push to a device :param device: the device string to push to; ex: cpu, cuda, cuda:0,1. For DDP, device should be the local_rank int value; ex: 0 :param ddp: set True to wrap module as a DDP object. If True, device should be set to the local_rank int value. Default is False :return: a tuple containing the model on desired device(s), the device name, and the ids for the device |
21,347 |
The provided code snippet includes necessary dependencies for implementing the `record` function. Write a Python function `def record(func)` to solve the following problem:
Wraps function with pytorch elastic record for error propagation. Introduced in pytorch v1.9 and will return the unmodified function for lesser versions
Here is the function:
def record(func):
"""
Wraps function with pytorch elastic record for error propagation. Introduced in
pytorch v1.9 and will return the unmodified function for lesser versions
"""
try:
from torch.distributed.elastic.multiprocessing.errors import record
return record(func)
except ImportError:
return func | Wraps function with pytorch elastic record for error propagation. Introduced in pytorch v1.9 and will return the unmodified function for lesser versions |
21,348 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
def default_device() -> str:
"""
:return: the device that should be defaulted to for the current setup.
if multiple gpus are available then will return a string with all of them,
else if single gpu available then will return cuda,
else returns cpu
"""
if not torch.cuda.is_available():
return "cpu"
if torch.cuda.device_count() < 2:
return "cuda"
device_ids = [str(i) for i in range(torch.cuda.device_count())]
return "cuda:{}".format(",".join(device_ids))
def device_of(inputs: Any):
if isinstance(inputs, Tensor):
return inputs.device
elif isinstance(inputs, Mapping):
for tens in inputs.values():
return device_of(tens)
elif isinstance(inputs, Iterable):
return device_of(inputs[0])
else:
raise RuntimeError("Unknown type of inputs to device_of function")
return default_device() | null |
21,349 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_optim_learning_rate` function. Write a Python function `def get_optim_learning_rate(optim: Optimizer) -> float` to solve the following problem:
:param optim: The optimizer to get the learning rate for :return: convenience function to get the first learning rate for any of the param groups in the optimizer
Here is the function:
def get_optim_learning_rate(optim: Optimizer) -> float:
"""
:param optim: The optimizer to get the learning rate for
:return: convenience function to get the first learning rate for any of
the param groups in the optimizer
"""
for param_group in optim.param_groups:
return param_group["lr"]
raise RuntimeError("cannot get learning_rate, no param_groups available") | :param optim: The optimizer to get the learning rate for :return: convenience function to get the first learning rate for any of the param groups in the optimizer |
21,350 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_optim_groups_learning_rates` function. Write a Python function `def get_optim_groups_learning_rates(optim: Optimizer) -> List[float]` to solve the following problem:
:param optim: The optimizer to get the learning rates for :return: get a list of tuples corresponding to the learning rates for the param groups in the optimizer
Here is the function:
def get_optim_groups_learning_rates(optim: Optimizer) -> List[float]:
"""
:param optim: The optimizer to get the learning rates for
:return: get a list of tuples corresponding to the learning rates for the
param groups in the optimizer
"""
return [group["lr"] for group in optim.param_groups] | :param optim: The optimizer to get the learning rates for :return: get a list of tuples corresponding to the learning rates for the param groups in the optimizer |
21,351 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `set_optim_learning_rate` function. Write a Python function `def set_optim_learning_rate( optim: Optimizer, value: float, groups: Optional[List[int]] = None )` to solve the following problem:
:param optim: The optimizer to set the learning rate for :param value: the learning rate to set for the optimizer, will set all param groups in the optim to this value
Here is the function:
def set_optim_learning_rate(
optim: Optimizer, value: float, groups: Optional[List[int]] = None
):
"""
:param optim: The optimizer to set the learning rate for
:param value: the learning rate to set for the optimizer,
will set all param groups in the optim to this value
"""
for (index, group) in enumerate(optim.param_groups):
if not groups or index in groups:
group["lr"] = value | :param optim: The optimizer to set the learning rate for :param value: the learning rate to set for the optimizer, will set all param groups in the optim to this value |
21,352 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
def early_stop_data_loader(data_loader: DataLoader, early_stop_steps: int):
"""
An iterator that goes through the data_loader for yields and stops
after early_stop_steps instead of the full loader
:param data_loader: the data loader to continually repeat
:param early_stop_steps: if set, the number of steps to run and break out early
instead of running all of the steps in the data loader,
if < 1 then will run the full length
:return: an iterable for the never ending data loader
"""
counter = 0
for data in data_loader:
yield data
counter += 1
if 0 < early_stop_steps <= counter:
break
The provided code snippet includes necessary dependencies for implementing the `infinite_data_loader` function. Write a Python function `def infinite_data_loader( data_loader: DataLoader, early_stop_steps: int = -1, cache: bool = False )` to solve the following problem:
A never ending data loader that will keep repeating the one passed in. Will additionally cache the data if requested. :param data_loader: the data loader to continually repeat :param early_stop_steps: if set, the number of steps to run and break out early instead of running all of the steps in the data loader :param cache: True to cache the results in memory and return those on subsequent requests, False otherwise :return: an iterable for the never ending data loader
Here is the function:
def infinite_data_loader(
data_loader: DataLoader, early_stop_steps: int = -1, cache: bool = False
):
"""
A never ending data loader that will keep repeating the one passed in.
Will additionally cache the data if requested.
:param data_loader: the data loader to continually repeat
:param early_stop_steps: if set, the number of steps to run and break out early
instead of running all of the steps in the data loader
:param cache: True to cache the results in memory and return those on
subsequent requests, False otherwise
:return: an iterable for the never ending data loader
"""
cached = None
while True:
if not cache or cached is None:
cached = []
for data in early_stop_data_loader(data_loader, early_stop_steps):
if cache:
cached.append(deepcopy(data))
yield data
else:
for data in cached:
yield data | A never ending data loader that will keep repeating the one passed in. Will additionally cache the data if requested. :param data_loader: the data loader to continually repeat :param early_stop_steps: if set, the number of steps to run and break out early instead of running all of the steps in the data loader :param cache: True to cache the results in memory and return those on subsequent requests, False otherwise :return: an iterable for the never ending data loader |
21,353 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `tensors_batch_size` function. Write a Python function `def tensors_batch_size(tensors: Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]])` to solve the following problem:
Default function for getting the batch size from a tensor or collection of tensors. Returns the batch size (zeroth index for shape) of the first found tensor. Supported use cases: - single tensor - Dictionary of single tensors - Dictionary of iterable of tensors - Dictionary of dictionary of tensors - Iterable of single tensors - Iterable of iterable of tensors - Iterable of dictionary of tensors :param tensors: the tensor or collection of tensors to get a batch size from, taken from the first found tensor :return: the batch size (0th element of shape) of the first contained tensor in the data
Here is the function:
def tensors_batch_size(tensors: Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]]):
"""
Default function for getting the batch size from a tensor or collection of tensors.
Returns the batch size (zeroth index for shape) of the first found tensor.
Supported use cases:
- single tensor
- Dictionary of single tensors
- Dictionary of iterable of tensors
- Dictionary of dictionary of tensors
- Iterable of single tensors
- Iterable of iterable of tensors
- Iterable of dictionary of tensors
:param tensors: the tensor or collection of tensors to get a batch size from,
taken from the first found tensor
:return: the batch size (0th element of shape) of the first contained
tensor in the data
"""
if isinstance(tensors, Tensor):
return tensors.shape[0]
if isinstance(tensors, Dict):
for key, tens in tensors.items():
batch_size = tensors_batch_size(tens)
if batch_size > -1:
return batch_size
if isinstance(tensors, Iterable):
for tens in tensors:
batch_size = tensors_batch_size(tens)
if batch_size > -1:
return batch_size
return -1 | Default function for getting the batch size from a tensor or collection of tensors. Returns the batch size (zeroth index for shape) of the first found tensor. Supported use cases: - single tensor - Dictionary of single tensors - Dictionary of iterable of tensors - Dictionary of dictionary of tensors - Iterable of single tensors - Iterable of iterable of tensors - Iterable of dictionary of tensors :param tensors: the tensor or collection of tensors to get a batch size from, taken from the first found tensor :return: the batch size (0th element of shape) of the first contained tensor in the data |
21,354 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `tensors_to_precision` function. Write a Python function `def tensors_to_precision( tensors: Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]], full_precision: bool ) -> Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]]` to solve the following problem:
:param tensors: the tensors to change the precision of :param full_precision: True for full precision (float 32) and False for half (float 16) :return: the tensors converted to the desired precision
Here is the function:
def tensors_to_precision(
tensors: Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]], full_precision: bool
) -> Union[Tensor, Iterable[Tensor], Dict[Any, Tensor]]:
"""
:param tensors: the tensors to change the precision of
:param full_precision: True for full precision (float 32) and
False for half (float 16)
:return: the tensors converted to the desired precision
"""
if isinstance(tensors, Tensor):
return tensors.float() if full_precision else tensors.half()
if isinstance(tensors, Mapping):
return {
key: tensors_to_precision(tens, full_precision)
for key, tens in tensors.items()
}
if isinstance(tensors, tuple):
return tuple(tensors_to_precision(tens, full_precision) for tens in tensors)
if isinstance(tensors, Iterable):
return [tensors_to_precision(tens, full_precision) for tens in tensors]
raise ValueError(
"unrecognized type for tensors given of {}".format(tensors.__class__.__name__)
) | :param tensors: the tensors to change the precision of :param full_precision: True for full precision (float 32) and False for half (float 16) :return: the tensors converted to the desired precision |
21,355 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
def tensor_sparsity(
tens: Tensor, dim: Union[None, int, List[int], Tuple[int, ...]] = None
) -> Tensor:
"""
:param tens: the tensor to calculate the sparsity for
:param dim: the dimension(s) to split the calculations over;
ex, can split over batch, channels, or combos
:return: the sparsity of the input tens, ie the fraction of numbers that are zero
"""
if dim is None:
zeros = (tens == 0).sum()
total = tens.numel()
return zeros.float() / float(total)
if isinstance(dim, int):
dim = [dim]
if max(dim) >= len(tens.shape):
raise ValueError(
"Unsupported dim given of {} in {} for tensor shape {}".format(
max(dim), dim, tens.shape
)
)
sum_dims = [ind for ind in range(len(tens.shape)) if ind not in dim]
zeros = (tens == 0).sum(dim=sum_dims) if sum_dims else tens == 0
total = numpy.prod(
[tens.shape[ind] for ind in range(len(tens.shape)) if ind not in dim]
)
permute_order = sorted(
((d, len(dim) - i - 1) for i, d in enumerate(dim)), reverse=True
)
permute = [d[1] for d in permute_order]
if permute != [i for i in range(len(permute))]:
# need to permute to get desired dimensions at the front
zeros = zeros.permute(*permute).contiguous()
return zeros.float() / float(total)
The provided code snippet includes necessary dependencies for implementing the `tensor_density` function. Write a Python function `def tensor_density(tens: Tensor, dim: Union[None, int, Iterable[int]] = None) -> Tensor` to solve the following problem:
:param tens: the tensor to calculate the density for :param dim: the dimension(s) to split the calculations over; ex, can split over batch, channels, or combos :return: the density of the input tens, ie the fraction of numbers that are non zero
Here is the function:
def tensor_density(tens: Tensor, dim: Union[None, int, Iterable[int]] = None) -> Tensor:
"""
:param tens: the tensor to calculate the density for
:param dim: the dimension(s) to split the calculations over; ex, can split over
batch, channels, or combos
:return: the density of the input tens, ie the fraction of numbers that are non zero
"""
density = (tensor_sparsity(tens, dim) - 1.0) * -1.0
return density | :param tens: the tensor to calculate the density for :param dim: the dimension(s) to split the calculations over; ex, can split over batch, channels, or combos :return: the density of the input tens, ie the fraction of numbers that are non zero |
21,356 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
try:
from torch.nn.qat import Conv3d as QATConv3d
except Exception as _err:
quant_conv3d_err = _err
QATConv3d = None
The provided code snippet includes necessary dependencies for implementing the `tensor_sample` function. Write a Python function `def tensor_sample( tens: Tensor, sample_size: int, dim: Union[None, int, List[int], Tuple[int, ...]] = None, ) -> Tensor` to solve the following problem:
:param tens: the tensor to grab samples from :param sample_size: the number of samples to grab overall if dim is not supplied or per each dim if it is :param dim: the dimension(s) to split the samples over; ex, can split over batch, channels, or combos :return: the sampled tensor
Here is the function:
def tensor_sample(
tens: Tensor,
sample_size: int,
dim: Union[None, int, List[int], Tuple[int, ...]] = None,
) -> Tensor:
"""
:param tens: the tensor to grab samples from
:param sample_size: the number of samples to grab overall if dim is not supplied
or per each dim if it is
:param dim: the dimension(s) to split the samples over;
ex, can split over batch, channels, or combos
:return: the sampled tensor
"""
if sample_size < 1:
raise ValueError("improper sample size given of {}".format(sample_size))
if dim is None:
indices = tens.new_zeros((sample_size,)).long().random_(0, tens.numel())
samples = tens.view(-1)[indices]
return samples
if isinstance(dim, int):
dim = [dim]
if max(dim) >= len(tens.shape):
raise ValueError(
"Unsupported dim given of {} in {} for tensor shape {}".format(
max(dim), dim, tens.shape
)
)
if dim != [ind for ind in range(len(dim))]:
# put the desired dimension(s) at the front to sample from
tens = tens.permute(
*dim, *[ind for ind in range(len(tens.shape)) if ind not in dim]
)
dim = [ind for ind in range(len(dim))]
if not tens.is_contiguous():
tens = tens.contiguous()
num_indices = int(numpy.prod([tens.shape[ind] for ind in range(len(dim))]))
elem_per_ind = int(
numpy.prod([tens.shape[ind] for ind in range(len(dim), len(tens.shape))])
)
# create a new tensor with offsets set for each of our elements that we are indexing
indices = tens.new_tensor(
[ind * elem_per_ind for ind in range(num_indices)], dtype=torch.long
).unsqueeze(1)
# now broadcast it across to the total number of elements we should end with
indices = indices * tens.new_ones((num_indices, sample_size), dtype=torch.long)
# finally add in a random number within the available range per index
indices += tens.new_zeros((num_indices, sample_size), dtype=torch.long).random_(
0, elem_per_ind
)
# get our samples
samples = tens.view(-1)[indices.view(-1)]
# reshape for the proper dimension
samples = samples.view(*(tens.shape[ind] for ind in dim), sample_size)
return samples | :param tens: the tensor to grab samples from :param sample_size: the number of samples to grab overall if dim is not supplied or per each dim if it is :param dim: the dimension(s) to split the samples over; ex, can split over batch, channels, or combos :return: the sampled tensor |
21,357 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `tensor_list_sparsity` function. Write a Python function `def tensor_list_sparsity(tensors: List[Tensor]) -> float` to solve the following problem:
:param tensors: the list of tensors to calculate the sparsity for :return: the total sparsity of all tensors in the list
Here is the function:
def tensor_list_sparsity(tensors: List[Tensor]) -> float:
"""
:param tensors: the list of tensors to calculate the sparsity for
:return: the total sparsity of all tensors in the list
"""
zeros = 0
numel = 0
for tensor in tensors:
zeros += (tensor == 0).sum().item()
numel += tensor.numel()
return float(zeros) / float(numel) | :param tensors: the list of tensors to calculate the sparsity for :return: the total sparsity of all tensors in the list |
21,358 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `mask_difference` function. Write a Python function `def mask_difference(old_mask: Tensor, new_mask: Tensor) -> Tensor` to solve the following problem:
:param old_mask: the old mask to compare against for calculating the difference :param new_mask: the new mask to compare with for calculating the difference :return: a tensor representing the change from the old_mask to the new_mask specifically values returned as 1.0 are newly unmasked (0.0 => 1.0) values returned as -1.0 are newly masked (1.0 => 0.0) values returned as 0.0 had no change in (0.0 => 0.0 or 1.0 => 1.0)
Here is the function:
def mask_difference(old_mask: Tensor, new_mask: Tensor) -> Tensor:
"""
:param old_mask: the old mask to compare against for calculating the difference
:param new_mask: the new mask to compare with for calculating the difference
:return: a tensor representing the change from the old_mask to the new_mask
specifically values returned as 1.0 are newly unmasked (0.0 => 1.0)
values returned as -1.0 are newly masked (1.0 => 0.0)
values returned as 0.0 had no change in (0.0 => 0.0 or 1.0 => 1.0)
"""
newly_masked = ((old_mask != new_mask) & (new_mask == 0.0)).type(old_mask.type())
newly_unmasked = ((old_mask != new_mask) & (new_mask == 1.0)).type(old_mask.type())
return -1.0 * newly_masked + newly_unmasked | :param old_mask: the old mask to compare against for calculating the difference :param new_mask: the new mask to compare with for calculating the difference :return: a tensor representing the change from the old_mask to the new_mask specifically values returned as 1.0 are newly unmasked (0.0 => 1.0) values returned as -1.0 are newly masked (1.0 => 0.0) values returned as 0.0 had no change in (0.0 => 0.0 or 1.0 => 1.0) |
21,359 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `replace_layer` function. Write a Python function `def replace_layer( module: Module, name: str, replace: Module, ) -> Module` to solve the following problem:
General function to replace a layer in a module with the given new one. :param module: the module to replace the layer in :param name: the name of the layer to replace the activation for :param replace: the module to replace the layer with :return: the original layer that was replaced
Here is the function:
def replace_layer(
module: Module,
name: str,
replace: Module,
) -> Module:
"""
General function to replace a layer in a module with the given new one.
:param module: the module to replace the layer in
:param name: the name of the layer to replace the activation for
:param replace: the module to replace the layer with
:return: the original layer that was replaced
"""
parent = module
sections = name.split(".")
for sec in sections[:-1]:
parent = parent.__getattr__(sec)
cur = parent.__getattr__(sections[-1])
parent.__setattr__(sections[-1], replace)
return cur | General function to replace a layer in a module with the given new one. :param module: the module to replace the layer in :param name: the name of the layer to replace the activation for :param replace: the module to replace the layer with :return: the original layer that was replaced |
21,360 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_terminal_layers` function. Write a Python function `def get_terminal_layers(module: Module) -> Dict[str, Module]` to solve the following problem:
:param module: the module to grab all terminal layers for :return: a list of all of the terminal layers in a model (ie not containers; so convs, linears, activations, etc)
Here is the function:
def get_terminal_layers(module: Module) -> Dict[str, Module]:
"""
:param module: the module to grab all terminal layers for
:return: a list of all of the terminal layers in a model
(ie not containers; so convs, linears, activations, etc)
"""
terminal = {}
for mod_name, mod in module.named_modules():
# check if it is a root node (only has itself in named_modules)
child_count = 0
for _, __ in mod.named_modules():
child_count += 1
if child_count != 1:
continue
terminal[mod_name] = mod
return terminal | :param module: the module to grab all terminal layers for :return: a list of all of the terminal layers in a model (ie not containers; so convs, linears, activations, etc) |
21,361 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_conv_layers` function. Write a Python function `def get_conv_layers(module: Module) -> Dict[str, Module]` to solve the following problem:
:param module: the module to grab all conv layers for :return: a list of all the conv layers in the module
Here is the function:
def get_conv_layers(module: Module) -> Dict[str, Module]:
"""
:param module: the module to grab all conv layers for
:return: a list of all the conv layers in the module
"""
return {
name: mod
for name, mod in module.named_modules()
if (isinstance(mod, _ConvNd) or (GPTConv1D and isinstance(mod, GPTConv1D)))
} | :param module: the module to grab all conv layers for :return: a list of all the conv layers in the module |
21,362 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_linear_layers` function. Write a Python function `def get_linear_layers(module: Module) -> Dict[str, Module]` to solve the following problem:
:param module: the module to grab all linear layers for :return: a list of all linear layers in the module
Here is the function:
def get_linear_layers(module: Module) -> Dict[str, Module]:
"""
:param module: the module to grab all linear layers for
:return: a list of all linear layers in the module
"""
return {
name: mod for name, mod in module.named_modules() if isinstance(mod, Linear)
} | :param module: the module to grab all linear layers for :return: a list of all linear layers in the module |
21,363 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_prunable_layers` function. Write a Python function `def get_prunable_layers(module: Module) -> List[Tuple[str, Module]]` to solve the following problem:
:param module: the module to get the prunable layers from :return: a list containing the names and modules of the prunable layers (Linear, ConvNd)
Here is the function:
def get_prunable_layers(module: Module) -> List[Tuple[str, Module]]:
"""
:param module: the module to get the prunable layers from
:return: a list containing the names and modules of the prunable layers
(Linear, ConvNd)
"""
return [
(name, mod)
for (name, mod) in module.named_modules()
if (
isinstance(mod, Linear)
or isinstance(mod, _ConvNd)
or (QATLinear and isinstance(mod, QATLinear))
or (QATConv2d and isinstance(mod, QATConv2d))
or (QATConv3d and isinstance(mod, QATConv3d))
or (GPTConv1D and isinstance(mod, GPTConv1D))
)
] | :param module: the module to get the prunable layers from :return: a list containing the names and modules of the prunable layers (Linear, ConvNd) |
21,364 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_quantizable_layers` function. Write a Python function `def get_quantizable_layers(module: Module) -> List[Tuple[str, Module]]` to solve the following problem:
:param module: the module to get the quantizable layers from :return: a list containing the names and modules of the quantizable layers (Linear, Conv2d, Conv3d)
Here is the function:
def get_quantizable_layers(module: Module) -> List[Tuple[str, Module]]:
"""
:param module: the module to get the quantizable layers from
:return: a list containing the names and modules of the quantizable layers
(Linear, Conv2d, Conv3d)
"""
if QATLinear is None:
raise ImportError(
"PyTorch version is not setup for Quantization. "
"Please install a QAT compatible version of PyTorch"
)
return [
(name, mod)
for (name, mod) in module.named_modules()
if (
isinstance(mod, Linear)
or isinstance(mod, Conv2d)
or (QATConv3d and isinstance(mod, Conv3d))
)
] | :param module: the module to get the quantizable layers from :return: a list containing the names and modules of the quantizable layers (Linear, Conv2d, Conv3d) |
21,365 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_quantized_layers` function. Write a Python function `def get_quantized_layers(module: Module) -> List[Tuple[str, Module]]` to solve the following problem:
:param module: the module to get the quantized layers from :return: a list containing the names and modules of the quantized layers (Linear, Conv2d, Conv3d)
Here is the function:
def get_quantized_layers(module: Module) -> List[Tuple[str, Module]]:
"""
:param module: the module to get the quantized layers from
:return: a list containing the names and modules of the quantized layers
(Linear, Conv2d, Conv3d)
"""
if QATLinear is None:
raise ImportError(
"PyTorch version is not setup for Quantization. "
"Please install a QAT compatible version of PyTorch"
)
quantized_layers = []
for (name, mod) in module.named_modules():
if (
(QATLinear and isinstance(mod, QATLinear))
or (QATConv2d and isinstance(mod, QATConv2d))
or (QATConv3d and isinstance(mod, QATConv3d))
):
quantized_layers.append((name, mod))
elif isinstance(mod, Conv3d) and not QATConv3d:
warnings.warn(
"Pytorch version is not setup for Conv3D Quantization. "
"Quantization of Conv3D layers will be skipped",
UserWarning,
)
return quantized_layers | :param module: the module to get the quantized layers from :return: a list containing the names and modules of the quantized layers (Linear, Conv2d, Conv3d) |
21,366 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
def get_layer(name: str, module: Module) -> Module:
"""
:param name: the name of the layer to grab from the module
:param module: the module containing the layer to grab
:return: the module representing the layer in the module
"""
if not name:
return module
layers = name.split(".")
layer = module
for name in layers:
layer = layer.__getattr__(name)
return layer
The provided code snippet includes necessary dependencies for implementing the `get_layer_param` function. Write a Python function `def get_layer_param(param: str, layer: str, module: Module) -> Parameter` to solve the following problem:
:param param: the name of the param to grab from the layer :param layer: the name of the layer to grab from the module :param module: the module containing the layer and the param :return: the param taken from the given layer in the module
Here is the function:
def get_layer_param(param: str, layer: str, module: Module) -> Parameter:
"""
:param param: the name of the param to grab from the layer
:param layer: the name of the layer to grab from the module
:param module: the module containing the layer and the param
:return: the param taken from the given layer in the module
"""
layer = get_layer(layer, module) # type: Module
param = layer.__getattr__(param) # type: Parameter
return param | :param param: the name of the param to grab from the layer :param layer: the name of the layer to grab from the module :param module: the module containing the layer and the param :return: the param taken from the given layer in the module |
21,367 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
NamedLayerParam = namedtuple(
"NamedLayerParam", ["layer_name", "layer", "param_name", "param"]
)
def get_layer(name: str, module: Module) -> Module:
"""
:param name: the name of the layer to grab from the module
:param module: the module containing the layer to grab
:return: the module representing the layer in the module
"""
if not name:
return module
layers = name.split(".")
layer = module
for name in layers:
layer = layer.__getattr__(name)
return layer
def any_str_or_regex_matches_param_name(
param_name: str,
name_or_regex_patterns: List[str],
) -> bool:
"""
:param param_name: The name of a parameter
:param name_or_regex_patterns: List of full param names to match to the input or
regex patterns to match with that should be prefixed with 're:'
:return: True if any given str or regex pattern matches the given name
"""
for name_or_regex in name_or_regex_patterns:
if name_or_regex[:3] == "re:":
pattern = name_or_regex[3:]
if re.match(pattern, param_name):
return True
else:
if param_name == name_or_regex:
return True
return False
def validate_all_params_found(
name_or_regex_patterns: List[str],
found_param_names: List[str],
):
"""
:param name_or_regex_patterns: List of full param names or regex patterns of them
to check for matches in found_param_names names
:param found_param_names: List of NamedLayerParam objects to check for matches
:raise RuntimeError: If there is a name or regex pattern that does not have a
match in found_param_names
"""
for name_or_regex in name_or_regex_patterns:
if "re:" != name_or_regex[:3] and name_or_regex in found_param_names:
continue # name found in list of full parameter names
if "re:" == name_or_regex[:3] and any(
re.match(name_or_regex[3:], name) for name in found_param_names
):
continue # regex pattern matches at least one full parameter name
raise RuntimeError(
"All supplied parameter names or regex patterns not found."
"No match for {} in found parameters {}. \nSupplied {}".format(
name_or_regex, found_param_names, name_or_regex_patterns
)
)
The provided code snippet includes necessary dependencies for implementing the `get_named_layers_and_params_by_regex` function. Write a Python function `def get_named_layers_and_params_by_regex( module: Module, param_names: List[str], params_strict: bool = False, ) -> List[NamedLayerParam]` to solve the following problem:
:param module: the module to get the matching layers and params from :param param_names: a list of names or regex patterns to match with full parameter paths. Regex patterns must be specified with the prefix 're:' :param params_strict: if True, this function will raise an exception if there a parameter is not found to match every name or regex in param_names :return: a list of NamedLayerParam tuples whose full parameter names in the given module match one of the given regex patterns or parameter names
Here is the function:
def get_named_layers_and_params_by_regex(
module: Module,
param_names: List[str],
params_strict: bool = False,
) -> List[NamedLayerParam]:
"""
:param module: the module to get the matching layers and params from
:param param_names: a list of names or regex patterns to match with full parameter
paths. Regex patterns must be specified with the prefix 're:'
:param params_strict: if True, this function will raise an exception if there a
parameter is not found to match every name or regex in param_names
:return: a list of NamedLayerParam tuples whose full parameter names in the given
module match one of the given regex patterns or parameter names
"""
named_layers_and_params = []
found_param_names = []
for layer_name, layer in module.named_modules():
for param_name, param in layer.named_parameters():
if "." in param_name: # skip parameters of nested layers
continue
full_param_name = "{}.{}".format(layer_name, param_name)
if any_str_or_regex_matches_param_name(full_param_name, param_names):
named_layers_and_params.append(
NamedLayerParam(layer_name, layer, param_name, param)
)
found_param_names.append(full_param_name)
elif layer_name.endswith(".module"):
# unwrap layers wrapped with a QuantWrapper and check if they match
parent_layer_name = ".".join(layer_name.split(".")[:-1])
parent_layer = get_layer(parent_layer_name, module)
skip_wrapper_name = "{}.{}".format(parent_layer_name, param_name)
if (
QuantWrapper is not None
and isinstance(parent_layer, QuantWrapper)
and any_str_or_regex_matches_param_name(
skip_wrapper_name, param_names
)
):
named_layers_and_params.append(
NamedLayerParam(layer_name, layer, param_name, param)
)
found_param_names.append(skip_wrapper_name)
if params_strict:
validate_all_params_found(param_names, found_param_names)
return named_layers_and_params | :param module: the module to get the matching layers and params from :param param_names: a list of names or regex patterns to match with full parameter paths. Regex patterns must be specified with the prefix 're:' :param params_strict: if True, this function will raise an exception if there a parameter is not found to match every name or regex in param_names :return: a list of NamedLayerParam tuples whose full parameter names in the given module match one of the given regex patterns or parameter names |
21,368 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
try:
from torch.nn.qat import Conv3d as QATConv3d
except Exception as _err:
quant_conv3d_err = _err
QATConv3d = None
The provided code snippet includes necessary dependencies for implementing the `set_deterministic_seeds` function. Write a Python function `def set_deterministic_seeds(seed: int = 0)` to solve the following problem:
Manually seeds the numpy, random, and torch packages. Also sets torch.backends.cudnn.deterministic to True :param seed: the manual seed to use. Default is 0
Here is the function:
def set_deterministic_seeds(seed: int = 0):
"""
Manually seeds the numpy, random, and torch packages.
Also sets torch.backends.cudnn.deterministic to True
:param seed: the manual seed to use. Default is 0
"""
numpy.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True | Manually seeds the numpy, random, and torch packages. Also sets torch.backends.cudnn.deterministic to True :param seed: the manual seed to use. Default is 0 |
21,369 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
try:
from torch.nn.qat import Conv3d as QATConv3d
except Exception as _err:
quant_conv3d_err = _err
QATConv3d = None
_LOGGER = logging.getLogger(__name__)
MEMORY_BOUNDED = "MEMORY_BOUNDED"
The provided code snippet includes necessary dependencies for implementing the `memory_aware_threshold` function. Write a Python function `def memory_aware_threshold(tensor: torch.Tensor, idx: int) -> Tensor` to solve the following problem:
Finds a threshold at the lookup idx in the most efficient way with available resources. Will be phased out when GPU-memory overhead of torch.sort reduces, or when torch.kthvalue becomes faster than torch.sort. :param tensor: A tensor to find a k-th smallest value in, where k=idx+1 :param idx: A lookup index :return: k-th smallest value from the given tensor, where k=idx+1
Here is the function:
def memory_aware_threshold(tensor: torch.Tensor, idx: int) -> Tensor:
"""
Finds a threshold at the lookup idx in the most efficient way with available
resources. Will be phased out when GPU-memory overhead of torch.sort reduces,
or when torch.kthvalue becomes faster than torch.sort.
:param tensor: A tensor to find a k-th smallest value in, where k=idx+1
:param idx: A lookup index
:return: k-th smallest value from the given tensor, where k=idx+1
"""
try:
if (
MEMORY_BOUNDED in os.environ
and os.environ[MEMORY_BOUNDED].lower() == "true"
):
return torch.kthvalue(tensor.reshape(-1), idx + 1)[0]
else:
return torch.sort(tensor.reshape(-1))[0][idx]
except RuntimeError:
_LOGGER.warning(
"Finding threshold from sparsity failed due to lack of memory, "
"will attempt to recover. Consider setting env variable "
f"{MEMORY_BOUNDED}=True in future runs."
)
torch.cuda.empty_cache()
os.environ[MEMORY_BOUNDED] = "True"
return torch.kthvalue(tensor.view(-1), idx + 1)[0] | Finds a threshold at the lookup idx in the most efficient way with available resources. Will be phased out when GPU-memory overhead of torch.sort reduces, or when torch.kthvalue becomes faster than torch.sort. :param tensor: A tensor to find a k-th smallest value in, where k=idx+1 :param idx: A lookup index :return: k-th smallest value from the given tensor, where k=idx+1 |
21,370 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
try:
from torch.nn.qat import Conv3d as QATConv3d
except Exception as _err:
quant_conv3d_err = _err
QATConv3d = None
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
def adjust_quantization_for_onnx_export(module: torch.nn.Module) -> torch.nn.Module:
# supported pytorch ranges are int8 or uint8
allowed_ranges = [(0, 127), (0, 255), (-128, 127)]
fake_quant_modules = [
m for m in module.modules() if m.__class__.__name__ == "FakeQuantize"
]
if _PARSED_TORCH_VERSION >= version.parse("1.12"):
for quant in fake_quant_modules:
# original ranges preserved in quant.quant_min and quant.quant_max
quant_range = (
quant.activation_post_process.quant_min,
quant.activation_post_process.quant_max,
)
if quant_range not in allowed_ranges:
if quant_range[0] < 0: # convert signed range to int8
quant.activation_post_process.quant_min = -128
quant.activation_post_process.quant_max = 127
else: # convert unsigned range to uint8
quant.activation_post_process.quant_min = 0
quant.activation_post_process.quant_max = 255
# don't update observer since ranges are artificially modified
quant.observer_enabled[0] = 0
else: # backwards compatibility for torch <= 1.11
for quant in fake_quant_modules:
quant_range = (quant.quant_min, quant.quant_max)
if quant_range not in allowed_ranges:
if quant_range[0] < 0: # convert signed range to int8
quant.quant_min = -128
quant.quant_max = 127
else: # convert unsigned range to uint8
quant.quant_min = 0
quant.quant_max = 255
# don't update observer since ranges are artificially modified
quant.observer_enabled[0] = 0 | null |
21,371 | import logging
import os
import random
import re
import warnings
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy
import torch
from packaging import version
from torch import Tensor
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import Conv2d, Conv3d, _ConvNd
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.utils import create_dirs, save_numpy
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `get_dependency_order` function. Write a Python function `def get_dependency_order( layer: Module, subset: Dict, an_input: Tensor, **kwargs ) -> List[str]` to solve the following problem:
Get a list of a subset of modules in layer ordered by execution order, which honors the dependencies in the graph :param layer: pytorch module to calculate dependencies for :param subset: subset of modules in the layer to include in the ordering :param an_input: example input to pass through the layer forward pass, used to determine execution order :return: list of module names in execution order
Here is the function:
def get_dependency_order(
layer: Module, subset: Dict, an_input: Tensor, **kwargs
) -> List[str]:
"""
Get a list of a subset of modules in layer ordered by execution order, which honors
the dependencies in the graph
:param layer: pytorch module to calculate dependencies for
:param subset: subset of modules in the layer to include in the ordering
:param an_input: example input to pass through the layer forward pass, used to
determine execution order
:return: list of module names in execution order
"""
order = []
def exe_input(name):
def _exe_input(_, inp, out):
if name in subset:
order.append(name)
return _exe_input
# register a hook for each module of interest, will be triggered in exeuction order
handles = [subset[name].register_forward_hook(exe_input(name)) for name in subset]
layer(an_input, **kwargs)
for h in handles:
h.remove()
return order | Get a list of a subset of modules in layer ordered by execution order, which honors the dependencies in the graph :param layer: pytorch module to calculate dependencies for :param subset: subset of modules in the layer to include in the ordering :param an_input: example input to pass through the layer forward pass, used to determine execution order :return: list of module names in execution order |
21,372 | from typing import Optional
import torch
from sparseml.pytorch.utils.logger import BaseLogger
from sparseml.pytorch.utils.sparsification_info.module_sparsification_info import (
ModuleSparsificationInfo,
)
class BaseLogger(ABC):
"""
Base class that all modifier loggers must implement.
:param name: name given to the logger, used for identification
:param enabled: True to log, False otherwise
"""
def __init__(self, name: str, enabled: bool = True):
self._name = name
self._enabled = enabled
def name(self) -> str:
"""
:return: name given to the logger, used for identification
"""
return self._name
def enabled(self) -> bool:
"""
:return: True to log, False otherwise
"""
return self._enabled
def enabled(self, value: bool):
"""
:param value: True to log, False otherwise
"""
self._enabled = value
def log_hyperparams(self, params: Dict[str, float]) -> bool:
"""
:param params: Each key-value pair in the dictionary is the name of the
hyper parameter and it's corresponding value.
:return: True if logged, False otherwise.
"""
return False
def log_scalar(
self,
tag: str,
value: float,
step: Optional[int] = None,
wall_time: Optional[float] = None,
**kwargs,
) -> bool:
"""
:param tag: identifying tag to log the value with
:param value: value to save
:param step: global step for when the value was taken
:param wall_time: global wall time for when the value was taken
:param kwargs: additional logging arguments to support Python and custom loggers
:return: True if logged, False otherwise.
"""
return False
def log_scalars(
self,
tag: str,
values: Dict[str, float],
step: Optional[int] = None,
wall_time: Optional[float] = None,
**kwargs,
) -> bool:
"""
:param tag: identifying tag to log the values with
:param values: values to save
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken
:param kwargs: additional logging arguments to support Python and custom loggers
:return: True if logged, False otherwise.
"""
return False
def log_string(
self,
tag: str,
string: str,
step: Optional[int] = None,
wall_time: Optional[float] = None,
**kwargs,
) -> bool:
"""
:param tag: identifying tag to log the values with
:param values: values to save
:param step: global step for when the values were taken
:param wall_time: global wall time for when the values were taken
:param kwargs: additional logging arguments to support Python and custom loggers
:return: True if logged, False otherwise.
"""
return False
def save(
self,
file_path: str,
**kwargs,
) -> bool:
"""
:param file_path: path to a file to be saved
:param kwargs: additional arguments that a specific logger might use
:return: True if saved, False otherwise
"""
return False
class ModuleSparsificationInfo(SparsificationInfo):
"""
Pydantic model for storing sparsification information of a torch module.
"""
summary_info: SparsificationSummaries = Field(
description="Model that holds the sparsification summary info of the module"
)
pruning_info: SparsificationPruning = Field(
description="Model that holds the pruning info of the module"
)
quantization_info: SparsificationQuantization = Field(
description="Model that holds the quantization info of the module"
)
def from_module(cls, module: torch.nn.Module) -> "ModuleSparsificationInfo":
"""
Factory method to create a ModuleSparsificationInfo object from a torch module.
:param module: the module to create the ModuleSparsificationInfo object from
:return: the ModuleSparsificationInfo object created from the module
"""
if not isinstance(module, torch.nn.Module):
raise ValueError(
"Module must be a torch.nn.Module, not {}".format(type(module))
)
return cls(
summary_info=SparsificationSummaries.from_module(module),
pruning_info=SparsificationPruning.from_module(module),
quantization_info=SparsificationQuantization.from_module(module),
)
def loggable_items(self, **kwargs) -> Generator[Tuple[str, Any], None, None]:
"""
A generator that yields the loggable items of
the ModuleSparsificationInfo object.
:param kwargs: additional kwargs to pass to the loggable items
:return a generator that yields a tuple of:
- the name of the loggable item
- the value of the loggable item
"""
for info in [self.summary_info, self.pruning_info, self.quantization_info]:
yield from info.loggable_items(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `log_module_sparsification_info` function. Write a Python function `def log_module_sparsification_info( module: torch.nn.Module, logger: BaseLogger, step: Optional[float] = None )` to solve the following problem:
Log the sparsification information for the given module to the given logger :param module: the module to log the sparsification information for :param logger: the logger to log the sparsification information to :param step: the global step for when the sparsification information is being logged. By default, is None
Here is the function:
def log_module_sparsification_info(
module: torch.nn.Module, logger: BaseLogger, step: Optional[float] = None
):
"""
Log the sparsification information for the given module to the given logger
:param module: the module to log the sparsification information for
:param logger: the logger to log the sparsification information to
:param step: the global step for when the sparsification information
is being logged. By default, is None
"""
sparsification_info = ModuleSparsificationInfo.from_module(module)
for tag, value in sparsification_info.loggable_items():
if isinstance(value, dict):
logger.log_scalars(tag=tag, step=step, values=value)
else:
logger.log_scalar(tag=tag, step=step, value=value) | Log the sparsification information for the given module to the given logger :param module: the module to log the sparsification information for :param logger: the logger to log the sparsification information to :param step: the global step for when the sparsification information is being logged. By default, is None |
21,373 | from typing import List, Optional, Union
import torch
from torch.nn.modules.linear import Identity
from torch.quantization import QuantWrapper
The provided code snippet includes necessary dependencies for implementing the `get_leaf_operations` function. Write a Python function `def get_leaf_operations( model: torch.nn.Module, operations_to_skip: Optional[List[torch.nn.Module]] = None, operations_to_unwrap: Optional[List[torch.nn.Module]] = None, ) -> List[torch.nn.Module]` to solve the following problem:
Get the leaf operations in the model (those that do not have operations as children) :param model: the model to get the leaf operations from :param operations_to_skip: a list of leaf operations that will be omitted when getting the leaf operations. If None passed, by default the Identity operation will be skipped :param operations_to_unwrap: a list of operations that will be unwrapped when getting the leaf operations. Unwrapping means that we directly add the module(s) that is/are wrapped by the operation (i.e. operation's `module` attribute) to the list of leaf operations. If None passed, by default the QuantWrapper operation will be unwrapped :return: a list of the leaf operations
Here is the function:
def get_leaf_operations(
model: torch.nn.Module,
operations_to_skip: Optional[List[torch.nn.Module]] = None,
operations_to_unwrap: Optional[List[torch.nn.Module]] = None,
) -> List[torch.nn.Module]:
"""
Get the leaf operations in the model
(those that do not have operations as children)
:param model: the model to get the leaf operations from
:param operations_to_skip: a list of leaf operations that will be
omitted when getting the leaf operations. If None passed, by
default the Identity operation will be skipped
:param operations_to_unwrap: a list of operations that will be unwrapped
when getting the leaf operations. Unwrapping means that we directly
add the module(s) that is/are wrapped by the operation (i.e. operation's
`module` attribute) to the list
of leaf operations. If None passed, by default the QuantWrapper
operation will be unwrapped
:return: a list of the leaf operations
"""
if operations_to_skip is None:
operations_to_skip = [Identity]
if operations_to_unwrap is None:
operations_to_unwrap = [QuantWrapper]
leaf_operations = []
children = list(model.children())
if children == []:
return model
else:
for child in children:
if isinstance(child, tuple(operations_to_unwrap)):
leaf_operations.append(child.module)
continue
try:
leaf_operations.extend(get_leaf_operations(child))
except TypeError:
leaf_operations.append(get_leaf_operations(child))
leaf_operations = [
op for op in leaf_operations if not isinstance(op, tuple(operations_to_skip))
]
return leaf_operations | Get the leaf operations in the model (those that do not have operations as children) :param model: the model to get the leaf operations from :param operations_to_skip: a list of leaf operations that will be omitted when getting the leaf operations. If None passed, by default the Identity operation will be skipped :param operations_to_unwrap: a list of operations that will be unwrapped when getting the leaf operations. Unwrapping means that we directly add the module(s) that is/are wrapped by the operation (i.e. operation's `module` attribute) to the list of leaf operations. If None passed, by default the QuantWrapper operation will be unwrapped :return: a list of the leaf operations |
21,374 | from typing import List, Optional, Union
import torch
from torch.nn.modules.linear import Identity
from torch.quantization import QuantWrapper
The provided code snippet includes necessary dependencies for implementing the `is_quantized` function. Write a Python function `def is_quantized(operation: torch.nn.Module) -> bool` to solve the following problem:
Check whether the operation is quantized (contains a quantization scheme)
Here is the function:
def is_quantized(operation: torch.nn.Module) -> bool:
"""
Check whether the operation is quantized (contains
a quantization scheme)
"""
return hasattr(operation, "quantization_scheme") | Check whether the operation is quantized (contains a quantization scheme) |
21,375 | from typing import List, Optional, Union
import torch
from torch.nn.modules.linear import Identity
from torch.quantization import QuantWrapper
def _get_num_bits(dtype: torch.dtype) -> int:
# Get the number of bits of a torch dtype
if dtype == torch.float16 or dtype == torch.bfloat16:
return 16
elif dtype == torch.float32:
return 32
elif dtype == torch.float64:
return 64
elif dtype == torch.int8:
return 8
elif dtype == torch.int16:
return 16
elif dtype == torch.int32:
return 32
elif dtype == torch.int64:
return 64
else:
raise ValueError("Unknown dtype: {}".format(dtype))
The provided code snippet includes necessary dependencies for implementing the `get_precision_information` function. Write a Python function `def get_precision_information( operation: torch.nn.Module, ) -> Union[None, int, "QuantizationScheme"]` to solve the following problem:
Get the information about the precision of the operation. 1) If operation is quantized, returns the quantization scheme of the operation. 2) If operation is not quantized, returns the numer of bits of the operation's weights. 3) If operation is not quantized and does not have a weights, returns None. :param operation: the operation to get the quantization scheme from :return: the quantization scheme of the operation, the number of bits of the operation's weights, or None if the operation is not quantized and does not have a weight
Here is the function:
def get_precision_information(
operation: torch.nn.Module,
) -> Union[None, int, "QuantizationScheme"]: # noqa F821
"""
Get the information about the precision of the operation.
1) If operation is quantized, returns the quantization
scheme of the operation.
2) If operation is not quantized, returns the numer of bits
of the operation's weights.
3) If operation is not quantized and does not have a weights,
returns None.
:param operation: the operation to get the quantization scheme from
:return: the quantization scheme of the operation, the number of bits
of the operation's weights, or None if the operation is not quantized
and does not have a weight
"""
if hasattr(operation, "quantization_scheme"):
return getattr(operation, "quantization_scheme")
elif hasattr(operation, "weight"):
return _get_num_bits(operation.weight.dtype)
else:
return None | Get the information about the precision of the operation. 1) If operation is quantized, returns the quantization scheme of the operation. 2) If operation is not quantized, returns the numer of bits of the operation's weights. 3) If operation is not quantized and does not have a weights, returns None. :param operation: the operation to get the quantization scheme from :return: the quantization scheme of the operation, the number of bits of the operation's weights, or None if the operation is not quantized and does not have a weight |
21,376 | from typing import Iterable, List, Tuple
import torch
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `get_output_grid_shapes` function. Write a Python function `def get_output_grid_shapes(outputs: List[Tensor]) -> List[Tensor]` to solve the following problem:
:param outputs: List of Yolo model outputs :return: A list of the grid dimensions for each of the Yolo outputs
Here is the function:
def get_output_grid_shapes(outputs: List[Tensor]) -> List[Tensor]:
"""
:param outputs: List of Yolo model outputs
:return: A list of the grid dimensions for each of the Yolo outputs
"""
return [Tensor(list(output.shape[2:4])) for output in outputs] | :param outputs: List of Yolo model outputs :return: A list of the grid dimensions for each of the Yolo outputs |
21,377 | from typing import Iterable, List, Tuple
import torch
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `yolo_v3_anchor_groups` function. Write a Python function `def yolo_v3_anchor_groups() -> List[Tensor]` to solve the following problem:
:return: List of the default anchor coordinate groups for Yolo V3 outputs
Here is the function:
def yolo_v3_anchor_groups() -> List[Tensor]:
"""
:return: List of the default anchor coordinate groups for Yolo V3 outputs
"""
return [
Tensor([[116, 90], [156, 198], [373, 326]]),
Tensor([[30, 61], [62, 45], [59, 119]]),
Tensor([[10, 13], [16, 30], [33, 23]]),
] | :return: List of the default anchor coordinate groups for Yolo V3 outputs |
21,378 | from typing import Iterable, List, Tuple
import torch
from torch import Tensor
def _width_height_iou(wh_a: Tensor, wh_b: Tensor) -> Tensor:
# [n,2], [m,2] -> [n,m]
wh_a = wh_a.unsqueeze(1)
wh_b = wh_b.unsqueeze(0)
area_a = wh_a.prod(2)
area_b = wh_b.prod(2)
intersection = torch.min(wh_a, wh_b).prod(2)
return intersection / (area_a + area_b - intersection)
The provided code snippet includes necessary dependencies for implementing the `build_targets` function. Write a Python function `def build_targets( targets: Tensor, anchors_groups: List[Tensor], grid_shapes: List[Tensor], iou_threshold: float = 0.2, ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]` to solve the following problem:
Returns a representation of the image targets according to the given anchor groups and grid shapes. :param targets: Yolo data targets tensor of shape n,6 with columns image number, class, center_x, center_y, width, height :param anchors_groups: List of n,2 Tensors of anchor point coordinates for each of the Yolo model's detectors :param grid_shapes: List of n,2 Tensors of the Yolo models output grid shapes for a particular input shape :param iou_threshold: the minimum IoU value to consider an object box to match to an anchor point. Default is 0.2 :return:
Here is the function:
def build_targets(
targets: Tensor,
anchors_groups: List[Tensor],
grid_shapes: List[Tensor],
iou_threshold: float = 0.2,
) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]:
"""
Returns a representation of the image targets according to the given
anchor groups and grid shapes.
:param targets: Yolo data targets tensor of shape n,6 with columns image number,
class, center_x, center_y, width, height
:param anchors_groups: List of n,2 Tensors of anchor point coordinates for
each of the Yolo model's detectors
:param grid_shapes: List of n,2 Tensors of the Yolo models output grid shapes
for a particular input shape
:param iou_threshold: the minimum IoU value to consider an object box to match
to an anchor point. Default is 0.2
:return:
"""
num_targets = targets.shape[0]
num_anchors = len(anchors_groups[0])
classes, boxes, indices, target_anchors = [], [], [], []
# copy targets for each anchor
anchor_indices = (
torch.arange(num_anchors, device=targets.device)
.float()
.view(num_anchors, 1)
.repeat(1, num_targets)
)
targets = torch.cat(
(targets.repeat(num_anchors, 1, 1), anchor_indices[:, :, None]), 2
)
offset_bias = 0.5
offset_values = (
torch.tensor(
[[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]], # None, j,k,l,m
device=targets.device,
).float()
* offset_bias
) # offsets
grid_scale = torch.ones(7, device=targets.device) # tensor for grid space scaling
for idx, anchors in enumerate(anchors_groups):
# scale targets to current grid
anchors = anchors.to(targets.device)
grid_scale[2:6] = torch.tensor(grid_shapes[idx])[[0, 1, 0, 1]]
scaled_targets = targets * grid_scale
if num_targets:
# mask non-matches
wh_iou_mask = (
_width_height_iou(anchors, scaled_targets[0, :, 4:6]) > iou_threshold
)
scaled_targets = scaled_targets[wh_iou_mask]
# adjust for offsets for grid index rounding
targets_xy = scaled_targets[:, 2:4]
targets_xy_inv = grid_scale[[2, 3]] - targets_xy
j, k = ((targets_xy % 1.0 < offset_bias) & (targets_xy > 1.0)).t()
l, m = ((targets_xy_inv % 1.0 < offset_bias) & (targets_xy_inv > 1.0)).t()
offset_filter = torch.stack((torch.ones_like(j), j, k, l, m))
scaled_targets = scaled_targets.repeat((5, 1, 1))[offset_filter]
offsets = (torch.zeros_like(targets_xy)[None] + offset_values[:, None])[
offset_filter
]
else:
scaled_targets = targets[0]
offsets = 0
# extract fields
image, clazz = scaled_targets[:, :2].long().t()
targets_xy = scaled_targets[:, 2:4]
targets_wh = scaled_targets[:, 4:6]
grid_indices = (targets_xy - offsets).long()
grid_x, grid_y = grid_indices.t()
anchor_idxs = scaled_targets[:, 6].long()
indices.append((image, anchor_idxs, grid_x, grid_y))
boxes.append(torch.cat((targets_xy - grid_indices.float(), targets_wh), 1))
target_anchors.append(anchors[anchor_idxs])
classes.append(clazz)
return classes, boxes, indices, target_anchors | Returns a representation of the image targets according to the given anchor groups and grid shapes. :param targets: Yolo data targets tensor of shape n,6 with columns image number, class, center_x, center_y, width, height :param anchors_groups: List of n,2 Tensors of anchor point coordinates for each of the Yolo model's detectors :param grid_shapes: List of n,2 Tensors of the Yolo models output grid shapes for a particular input shape :param iou_threshold: the minimum IoU value to consider an object box to match to an anchor point. Default is 0.2 :return: |
21,379 | from typing import Iterable, List, Tuple
import torch
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `box_giou` function. Write a Python function `def box_giou(boxes_a: Tensor, boxes_b: Tensor) -> Tensor` to solve the following problem:
:param boxes_a: 4,N Tensor of xywh bounding boxes :param boxes_b: 4,N Tensor of xywh bounding boxes :return: Shape N Tensor of GIoU values between boxes in the input tensors
Here is the function:
def box_giou(boxes_a: Tensor, boxes_b: Tensor) -> Tensor:
"""
:param boxes_a: 4,N Tensor of xywh bounding boxes
:param boxes_b: 4,N Tensor of xywh bounding boxes
:return: Shape N Tensor of GIoU values between boxes in the input tensors
"""
# get ltrb coordinates
lt_x_a = boxes_a[0] - boxes_a[2] / 2.0
lt_y_a = boxes_a[1] - boxes_a[3] / 2.0
rb_x_a = boxes_a[0] + boxes_a[2] / 2.0
rb_y_a = boxes_a[1] + boxes_a[3] / 2.0
lt_x_b = boxes_b[0] - boxes_b[2] / 2.0
lt_y_b = boxes_b[1] - boxes_b[3] / 2.0
rb_x_b = boxes_b[0] + boxes_b[2] / 2.0
rb_y_b = boxes_b[1] + boxes_b[3] / 2.0
# base IoU
inter = (torch.min(rb_x_a, rb_x_b) - torch.max(lt_x_a, lt_x_b)).clamp(0) * (
torch.min(rb_y_a, rb_y_b) - torch.max(lt_y_a, lt_y_b)
).clamp(0)
area_a = boxes_a[2] * boxes_a[3]
area_b = boxes_b[2] * boxes_b[3]
union = area_a + area_b - inter + 1e-9
iou = inter / union
# convex area
convex_w = torch.max(rb_x_a, rb_x_b) - torch.min(lt_x_a, lt_x_b)
convex_h = torch.max(rb_y_a, rb_y_b) - torch.min(lt_y_a, lt_y_b)
convex_area = convex_w * convex_h + 1e-9 # convex area
return iou - (convex_area - union) / convex_area # GIoU | :param boxes_a: 4,N Tensor of xywh bounding boxes :param boxes_b: 4,N Tensor of xywh bounding boxes :return: Shape N Tensor of GIoU values between boxes in the input tensors |
21,380 | from typing import Iterable, List, Tuple
import torch
from torch import Tensor
class YoloGrids(object):
"""
Helper class to compute and store Yolo output and anchor box grids
:param anchor_groups: List of n,2 tensors of the Yolo model's anchor points
for each output group. Defaults to yolo_v3_anchor_groups
"""
def __init__(self, anchor_groups: List[Tensor] = None):
self._grids = {}
anchor_groups = anchor_groups or yolo_v3_anchor_groups()
self._anchor_grids = [
t.clone().view(1, -1, 1, 1, 2) for t in yolo_v3_anchor_groups()
]
def get_grid(self, size_x: int, size_y: int) -> Tensor:
"""
:param size_x: grid size x
:param size_y: grid size y
:return: Yolo output box grid for size x,y to be used for model output decoding.
will have shape (1, 1, size_y, size_x, 2)
"""
grid_shape = (size_x, size_y)
if grid_shape not in self._grids:
coords_y, coords_x = torch.meshgrid(
[torch.arange(size_y), torch.arange(size_x)]
)
grid = torch.stack((coords_x, coords_y), 2)
self._grids[grid_shape] = grid.view(1, 1, size_y, size_x, 2)
return self._grids[grid_shape]
def get_anchor_grid(self, group_idx: int) -> Tensor:
"""
:param group_idx: Index of output group for this anchor grid
:return: grid tensor of shape 1, num_anchors, 1, 1, 2
"""
return self._anchor_grids[group_idx]
def num_anchor_grids(self) -> int:
"""
:return: The number of anchor grids available (number of yolo model outputs)
"""
return len(self._anchor_grids)
def _xywh_to_ltrb(boxes, in_place: bool = False):
if not in_place:
boxes = boxes.clone()
boxes[:, 0], boxes[:, 2] = ( # ltrb x
boxes[:, 0] - boxes[:, 2] / 2.0,
boxes[:, 0] + boxes[:, 2] / 2.0,
)
boxes[:, 1], boxes[:, 3] = ( # ltrb y
boxes[:, 1] - boxes[:, 3] / 2.0,
boxes[:, 1] + boxes[:, 3] / 2.0,
)
return boxes
The provided code snippet includes necessary dependencies for implementing the `postprocess_yolo` function. Write a Python function `def postprocess_yolo( preds: List[Tensor], input_shape: Iterable[int], yolo_grids: YoloGrids = None, confidence_threshold: float = 0.1, iou_threshold: float = 0.6, max_detections: int = 300, ) -> List[Tuple[Tensor, Tensor, Tensor]]` to solve the following problem:
Decode the outputs of a Yolo model and perform non maximum suppression on the predicted boxes. :param preds: list of Yolo model output tensors :param input_shape: shape of input image to model. Default is [640, 640] :param yolo_grids: optional YoloGrids object for caching previously used grid shapes :param confidence_threshold: minimum confidence score for a prediction to be considered a detection. Default is 0.1 :param iou_threshold: IoU threshold for non maximum suppression. Default is 0.6 :param max_detections: maximum number of detections after nms. Default is 300 :return: List of predicted bounding boxes (n,4), labels, and scores for each output in the batch
Here is the function:
def postprocess_yolo(
preds: List[Tensor],
input_shape: Iterable[int],
yolo_grids: YoloGrids = None,
confidence_threshold: float = 0.1,
iou_threshold: float = 0.6,
max_detections: int = 300,
) -> List[Tuple[Tensor, Tensor, Tensor]]:
"""
Decode the outputs of a Yolo model and perform non maximum suppression
on the predicted boxes.
:param preds: list of Yolo model output tensors
:param input_shape: shape of input image to model. Default is [640, 640]
:param yolo_grids: optional YoloGrids object for caching previously used grid shapes
:param confidence_threshold: minimum confidence score for a prediction to be
considered a detection. Default is 0.1
:param iou_threshold: IoU threshold for non maximum suppression. Default is 0.6
:param max_detections: maximum number of detections after nms. Default is 300
:return: List of predicted bounding boxes (n,4), labels, and scores for each output
in the batch
"""
if batched_nms is None:
raise RuntimeError(
"Unable to import batched_nms from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
yolo_grids = yolo_grids or YoloGrids()
# decode each of the model output grids then concatenate
outputs = []
for idx, pred in enumerate(preds):
pred = pred.sigmoid()
# build grid and calculate stride
grid_shape = pred.shape[2:4]
grid = yolo_grids.get_grid(*grid_shape)
anchor_grid = yolo_grids.get_anchor_grid(idx)
stride = input_shape[0] / grid_shape[0]
# decode xywh box values
pred[..., 0:2] = (pred[..., 0:2] * 2.0 - 0.5 + grid) * stride
pred[..., 2:4] = (pred[..., 2:4] * 2) ** 2 * anchor_grid
# flatten anchor and grid dimensions -> (bs, num_predictions, num_classes + 5)
outputs.append(pred.view(pred.size(0), -1, pred.size(-1)))
outputs = torch.cat(outputs, 1)
# perform nms on each image in batch
nms_outputs = []
for image_idx, output in enumerate(outputs):
# filter out low confidence predictions
confidence_mask = output[..., 4] > confidence_threshold
output = output[confidence_mask]
if output.size(0) == 0: # no predictions, return empty tensor
nms_outputs.append(torch.empty(0, 6))
continue
# scale class confidences by object confidence, convert to ltrb
output[:, 5:] *= output[:, 4:5]
_xywh_to_ltrb(output[:, :4], in_place=True)
# attach labels of all positive predictions
class_confidence_mask = output[:, 5:] > confidence_threshold
pred_idxs, class_idxs = class_confidence_mask.nonzero(as_tuple=False).t()
output = torch.cat(
[
output[pred_idxs, :4],
output[pred_idxs, class_idxs + 5].unsqueeze(-1),
class_idxs.float().unsqueeze(-1),
],
1,
)
if output.size(0) == 0: # no predictions, return empty tensor
nms_outputs.append(torch.empty(0, 6))
continue
# run nms
nms_filter = batched_nms( # boxes, scores, labels, threshold
output[:, :4], output[:, 4], output[:, 5], iou_threshold
)
if nms_filter.size(0) > max_detections:
nms_filter = nms_filter[:max_detections]
output = output[nms_filter]
# extract outputs, rescale boxes to [0, 1]
boxes = output[:, :4]
boxes[:, [0, 2]] /= input_shape[0] # scale x
boxes[:, [1, 3]] /= input_shape[1] # scale y
labels = output[:, 5].long()
scores = output[:, 4]
nms_outputs.append((boxes, labels, scores))
return nms_outputs | Decode the outputs of a Yolo model and perform non maximum suppression on the predicted boxes. :param preds: list of Yolo model output tensors :param input_shape: shape of input image to model. Default is [640, 640] :param yolo_grids: optional YoloGrids object for caching previously used grid shapes :param confidence_threshold: minimum confidence score for a prediction to be considered a detection. Default is 0.1 :param iou_threshold: IoU threshold for non maximum suppression. Default is 0.6 :param max_detections: maximum number of detections after nms. Default is 300 :return: List of predicted bounding boxes (n,4), labels, and scores for each output in the batch |
21,381 | import itertools
import math
import random
from collections import defaultdict
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import numpy
import torch
from PIL import Image
from torch import Tensor
class DefaultBoxes(object):
"""
Convenience class for creating, representing, encoding, and decoding default boxes
:param image_size: input image size
:param feature_maps: list of feature map sizes
:param steps: steps to use between boxes in a feature map
:param scales: list of ranges of size scales to use for each feature map
:param aspect_ratios: list of aspect ratios to construct boxes with
:param scale_xy: parameter to scale box center by when encoding
:param scale_wh: parameter to scale box dimensions by when encoding
"""
def __init__(
self,
image_size: int,
feature_maps: List[int],
steps: List[int],
scales: List[int],
aspect_ratios: List[List[int]],
scale_xy: float = 0.1,
scale_wh: float = 0.2,
):
self._feature_maps = feature_maps
self._image_size = image_size
self._scale_xy = scale_xy
self._scale_wh = scale_wh
# According to https://github.com/weiliu89/caffe
# Calculation method slightly different from paper
self._steps = steps
self._scales = scales
self._aspect_ratios = aspect_ratios
self._default_boxes = self._get_default_boxes()
self._default_boxes_ltrb = self._get_default_boxes_ltrb()
def _get_default_boxes(self) -> Tensor:
default_boxes = []
feature_steps = self._image_size / numpy.array(self._steps)
# size of feature and number of feature
for idx, feature_map_size in enumerate(self._feature_maps):
# unpack scales
min_scale, max_scale = self._scales[idx]
# set scales to range based on image size
min_scale = min_scale / self._image_size
max_scale = max_scale / self._image_size
mid_scale = math.sqrt(min_scale * max_scale)
all_sizes = [(min_scale, min_scale), (mid_scale, mid_scale)]
for alpha in self._aspect_ratios[idx]:
w = min_scale * math.sqrt(alpha)
h = min_scale / math.sqrt(alpha)
all_sizes.append((w, h))
all_sizes.append((h, w))
for w, h in all_sizes:
for i, j in itertools.product(range(feature_map_size), repeat=2):
cx = (j + 0.5) / feature_steps[idx]
cy = (i + 0.5) / feature_steps[idx]
default_boxes.append((cx, cy, w, h))
default_boxes = torch.tensor(default_boxes, dtype=torch.float)
default_boxes.clamp_(min=0, max=1)
return default_boxes
def _get_default_boxes_ltrb(self) -> Tensor:
# For IoU calculation
default_boxes_ltrb = self._default_boxes.clone()
default_boxes_ltrb[:, 0] = (
self._default_boxes[:, 0] - 0.5 * self._default_boxes[:, 2]
)
default_boxes_ltrb[:, 1] = (
self._default_boxes[:, 1] - 0.5 * self._default_boxes[:, 3]
)
default_boxes_ltrb[:, 2] = (
self._default_boxes[:, 0] + 0.5 * self._default_boxes[:, 2]
)
default_boxes_ltrb[:, 3] = (
self._default_boxes[:, 1] + 0.5 * self._default_boxes[:, 3]
)
return default_boxes_ltrb
def scale_xy(self) -> float:
"""
:return: parameter to scale box center by when encoding
"""
return self._scale_xy
def scale_wh(self) -> float:
"""
:return: parameter to scale box dimensions by when encoding
"""
return self._scale_wh
def num_default_boxes(self) -> int:
"""
:return: the number of default boxes this object defines
"""
return self._default_boxes.size(0)
def as_ltrb(self) -> Tensor:
"""
:return: The default boxes represented by this object in
top left, top right pixel representation
"""
return self._default_boxes_ltrb
def as_xywh(self) -> Tensor:
"""
:return: The default boxes represented by this object in
center pixel, width, height representation
"""
return self._default_boxes
def encode_image_box_labels(
self, boxes: Tensor, labels: Tensor, threshold: float = 0.5
) -> Tuple[Tensor, Tensor]:
"""
Given the bounding box and image annotations for a single image with N objects
will encode the box annotations as offsets to the default boxes and labels
to the associated default boxes based on the annotation boxes and default
boxes with an intersection over union (IoU) greater than the given threshold.
:param boxes: Bounding box annotations for objects in an image. Should have
shape N,4 and be represented in ltrb format
:param labels: Label annotations for N objects in an image.
:param threshold: The minimum IoU bounding boxes and default boxes should share
to be encoded
:return: A tuple of the offset encoded bounding boxes and default box encoded
labels
"""
if box_iou is None:
raise RuntimeError(
"Unable to import box_iou from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
if labels.numel() == 0:
# return encoded box offsets as zeros
boxes_encoded = torch.zeros(4, self.num_default_boxes).float()
labels_encoded = torch.zeros(self.num_default_boxes, dtype=torch.long)
return boxes_encoded, labels_encoded
ious = box_iou(boxes, self._default_boxes_ltrb) # N,num_default_box
# Ensure that at least one box is encoded for each annotation
best_dbox_ious, best_dbox_idx = ious.max(dim=0) # best IoU for each default box
best_bbox_ious, best_bbox_idx = ious.max(dim=1) # best IoU for each label box
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0))
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter default boxes by IoU threshold
labels_encoded = torch.zeros(self.num_default_boxes, dtype=torch.long)
boxes_masked = self._default_boxes_ltrb.clone()
threshold_mask = best_dbox_ious > threshold
labels_encoded[threshold_mask] = labels[best_dbox_idx[threshold_mask]].long()
boxes_masked[threshold_mask, :] = boxes[best_dbox_idx[threshold_mask], :]
_ltrb_to_xywh(boxes_masked) # convert to lrtb format
# encode masked boxes as offset tensor
xy_encoded = (
(1.0 / self.scale_xy)
* (boxes_masked[:, :2] - self._default_boxes[:, :2])
/ self._default_boxes[:, :2]
)
wh_encoded = (1.0 / self.scale_wh) * (
boxes_masked[:, 2:] / self._default_boxes[:, 2:]
).log()
boxes_encoded = torch.cat(
(xy_encoded, wh_encoded), dim=1
) # shape: num_default_boxes, 4
boxes_encoded = boxes_encoded.transpose(
0, 1
).contiguous() # final shape: 4, num_default_boxes
return boxes_encoded.float(), labels_encoded.long()
def decode_output_batch(
self,
boxes: Tensor,
scores: Tensor,
score_threhsold: float = 0.01,
iou_threshold: float = 0.45,
max_detections: int = 200,
) -> List[Tuple[Tensor, Tensor, Tensor]]:
"""
Decodes a batch detection model outputs from default box offsets and class
scores to ltrb formatted bounding boxes, predicted labels, and scores
for each image of the batch using non maximum suppression.
:param boxes: Encoded default-box offsets. Expected shape:
batch_size,4,num_default_boxes
:param scores: Class scores for each image, class, box combination.
Expected shape: batch_size,num_classes,num_default_boxes
:param score_threhsold: minimum softmax score to be considered a positive
prediction. Default is 0.01 following the SSD paper
:param iou_threshold: The minimum IoU between two boxes to be considered the
same object in non maximum suppression
:param max_detections: the maximum number of detections to keep per image.
Default is 200
:return: Detected object boudning boxes, predicted labels, and class score for
each image in this batch
"""
if batched_nms is None:
raise RuntimeError(
"Unable to import batched_nms from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
# Re-order so that dimensions are batch_size,num_default_boxes,{4,num_classes}
boxes = boxes.permute(0, 2, 1)
scores = scores.permute(0, 2, 1)
# convert box offsets to bounding boxes and convert to ltrb form
default_boxes = self._default_boxes.unsqueeze(0) # extra dimension for math ops
boxes[:, :, :2] = (
self.scale_xy * boxes[:, :, :2] * default_boxes[:, :, :2]
+ default_boxes[:, :, :2]
)
boxes[:, :, 2:] = (self._scale_wh * boxes[:, :, 2:]).exp() * default_boxes[
:, :, 2:
]
_xywh_to_ltrb_batch(boxes)
# take softmax of class scores
scores = torch.nn.functional.softmax(scores, dim=-1) # class dimension
# run non max suppression for each image in the batch and store outputs
detection_outputs = []
for image_boxes, box_class_scores in zip(boxes.split(1, 0), scores.split(1, 0)):
# strip batch dimension
image_boxes = image_boxes.squeeze(0)
box_class_scores = box_class_scores.squeeze(0)
# get highest score per box and filter out background class
box_class_scores[:, 0] = 0
box_scores, box_labels = box_class_scores.max(dim=1)
# background_filter = torch.nonzero(box_labels, as_tuple=False).squeeze()
background_filter = box_scores > score_threhsold
image_boxes = image_boxes[background_filter]
box_scores = box_scores[background_filter]
box_labels = box_labels[background_filter]
if image_boxes.dim() == 0:
# nothing predicted, add empty result and continue
detection_outputs.append(
(torch.zeros(1, 4), torch.zeros(1), torch.zeros(1))
)
continue
if image_boxes.dim() == 1:
image_boxes = image_boxes.unsqueeze(0)
box_scores = box_scores.unsqueeze(0)
box_labels = box_labels.unsqueeze(0)
# filter boxes, classes, and scores by nms results
nms_filter = batched_nms(image_boxes, box_scores, box_labels, iou_threshold)
if nms_filter.size(0) > max_detections:
# update nms_filter to keep the boxes with top max_detections scores
box_scores_nms = box_scores[nms_filter]
sorted_scores_nms_idx = torch.argsort(box_scores_nms, descending=True)
nms_filter = nms_filter[sorted_scores_nms_idx[:max_detections]]
detection_outputs.append(
(
image_boxes[nms_filter],
box_labels[nms_filter],
box_scores[nms_filter],
)
)
return detection_outputs
The provided code snippet includes necessary dependencies for implementing the `get_default_boxes_300` function. Write a Python function `def get_default_boxes_300(voc: bool = False) -> DefaultBoxes` to solve the following problem:
Convenience function for generating DefaultBoxes object for standard SSD300 model :param voc: set True if default boxes should be made for VOC dataset. Will set scales to be slightly larger than for the default COCO dataset configuration :return: DefaultBoxes object implemented for standard SSD300 models
Here is the function:
def get_default_boxes_300(voc: bool = False) -> DefaultBoxes:
"""
Convenience function for generating DefaultBoxes object for standard SSD300 model
:param voc: set True if default boxes should be made for VOC dataset.
Will set scales to be slightly larger than for the default
COCO dataset configuration
:return: DefaultBoxes object implemented for standard SSD300 models
"""
image_size = 300
feature_maps = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
# use the scales here:
# https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
if voc:
scales = [[30, 60], [60, 111], [111, 162], [162, 213], [213, 264], [264, 315]]
else:
scales = [[21, 45], [45, 99], [99, 153], [153, 207], [207, 261], [261, 315]]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
return DefaultBoxes(image_size, feature_maps, steps, scales, aspect_ratios) | Convenience function for generating DefaultBoxes object for standard SSD300 model :param voc: set True if default boxes should be made for VOC dataset. Will set scales to be slightly larger than for the default COCO dataset configuration :return: DefaultBoxes object implemented for standard SSD300 models |
21,382 | import itertools
import math
import random
from collections import defaultdict
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import numpy
import torch
from PIL import Image
from torch import Tensor
def _ltrb_to_xywh(boxes):
# in-place conversion from ltrb to cx,cy,w,h format
# expected input shape N,4
cx = 0.5 * (boxes[:, 0] + boxes[:, 2])
cy = 0.5 * (boxes[:, 1] + boxes[:, 3])
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
boxes[:, 0] = cx
boxes[:, 1] = cy
boxes[:, 2] = w
boxes[:, 3] = h | null |
21,383 | import itertools
import math
import random
from collections import defaultdict
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import numpy
import torch
from PIL import Image
from torch import Tensor
def _xywh_to_ltrb_batch(boxes):
# in-place conversion from cx, cy, w, h format to ltrb
# expected input shape M,N,4
lt_x = boxes[:, :, 0] - 0.5 * boxes[:, :, 2]
lt_y = boxes[:, :, 1] - 0.5 * boxes[:, :, 3]
rb_x = boxes[:, :, 0] + 0.5 * boxes[:, :, 2]
rb_y = boxes[:, :, 1] + 0.5 * boxes[:, :, 3]
boxes[:, :, 0] = lt_x
boxes[:, :, 1] = lt_y
boxes[:, :, 2] = rb_x
boxes[:, :, 3] = rb_y | null |
21,384 | import itertools
import math
import random
from collections import defaultdict
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import numpy
import torch
from PIL import Image
from torch import Tensor
_SSD_RANDOM_CROP_OPTIONS = (
# return original
None,
# random crop (all IoUs are valid)
-1,
# crop with minimum box IoU
0.1,
0.3,
0.5,
0.7,
0.9,
)
The provided code snippet includes necessary dependencies for implementing the `ssd_random_crop` function. Write a Python function `def ssd_random_crop( image: Image.Image, boxes: Tensor, labels: Tensor ) -> Tuple[Image.Image, Tensor, Tensor]` to solve the following problem:
Performs one of the random SSD crops on a given image, bounding boxes, and labels as implemented in the original paper. | Chooses between following 3 conditions: | 1. Preserve the original image | 2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9 | 3. Random crop Adapted from: https://github.com/chauhan-utk/ssd.DomainAdaptation :param image: the image to potentially crop :param boxes: a tensor of bounding boxes in ltrb format with shape n_boxes,4 :param labels: a tensor of labels for each of the bounding boxes :return: the cropped image, boxes, and labels
Here is the function:
def ssd_random_crop(
image: Image.Image, boxes: Tensor, labels: Tensor
) -> Tuple[Image.Image, Tensor, Tensor]:
"""
Performs one of the random SSD crops on a given image, bounding boxes,
and labels as implemented in the original paper.
| Chooses between following 3 conditions:
| 1. Preserve the original image
| 2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9
| 3. Random crop
Adapted from: https://github.com/chauhan-utk/ssd.DomainAdaptation
:param image: the image to potentially crop
:param boxes: a tensor of bounding boxes in ltrb format with shape n_boxes,4
:param labels: a tensor of labels for each of the bounding boxes
:return: the cropped image, boxes, and labels
"""
if box_iou is None:
raise RuntimeError(
"Unable to import box_iou from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
# Loop will always return something when None or 0 is selected
while True:
min_iou = random.choice(_SSD_RANDOM_CROP_OPTIONS)
# do nothing
if min_iou is None:
return image, boxes, labels
w_orig, h_orig = image.size
# search for 50 random crops before trying a different threshold
for _ in range(50):
# crops to [.1,1.0] of image area since 0.3 * 0.3 ~= 0.1
w_crop = random.uniform(0.3, 1.0)
h_crop = random.uniform(0.3, 1.0)
if w_crop / h_crop < 0.5 or w_crop / h_crop > 2:
continue # keep crop ratio between 1:2 / 2:1
# generate bounding box of size w_crop,h_crop
left = random.uniform(0, 1.0 - w_crop)
top = random.uniform(0, 1.0 - h_crop)
right = left + w_crop
bottom = top + h_crop
# get IoUs between given bounding boxes and cropped box
ious = box_iou(boxes, torch.tensor([[left, top, right, bottom]]))
if not (ious > min_iou).all():
continue # do not use this crop if all boxes do not pass threshold
# discard any boxes whose center is not in the cropped image
x_centers = 0.5 * (boxes[:, 0] + boxes[:, 2])
y_centers = 0.5 * (boxes[:, 1] + boxes[:, 3])
center_in_crop_mask = (
(x_centers > left)
& (x_centers < right)
& (y_centers > top)
& (y_centers < bottom)
)
if not center_in_crop_mask.any():
continue # do not use crop if no boxes are centered in it
# clip bounding boxes to the cropped boundaries
boxes[boxes[:, 0] < left, 0] = left
boxes[boxes[:, 1] < top, 1] = top
boxes[boxes[:, 2] > right, 2] = right
boxes[boxes[:, 3] > bottom, 3] = bottom
# drop bounding boxes whose centers are not in the copped region
boxes = boxes[center_in_crop_mask, :]
labels = labels[center_in_crop_mask]
# expand the cropped region to map to pixels in the image and crop
image_crop_box = (
int(left * w_orig),
int(top * h_orig),
int(right * w_orig),
int(bottom * h_orig),
)
image = image.crop(image_crop_box)
# shift and crop bounding boxes
boxes[:, 0] = (boxes[:, 0] - left) / w_crop
boxes[:, 1] = (boxes[:, 1] - top) / h_crop
boxes[:, 2] = (boxes[:, 2] - left) / w_crop
boxes[:, 3] = (boxes[:, 3] - top) / h_crop
return image, boxes, labels | Performs one of the random SSD crops on a given image, bounding boxes, and labels as implemented in the original paper. | Chooses between following 3 conditions: | 1. Preserve the original image | 2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9 | 3. Random crop Adapted from: https://github.com/chauhan-utk/ssd.DomainAdaptation :param image: the image to potentially crop :param boxes: a tensor of bounding boxes in ltrb format with shape n_boxes,4 :param labels: a tensor of labels for each of the bounding boxes :return: the cropped image, boxes, and labels |
21,385 | import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from contextlib import ExitStack
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.utils.hooks import RemovableHandle
from tqdm import auto
from sparseml.pytorch.utils.helpers import (
get_optim_learning_rate,
tensors_batch_size,
tensors_module_forward,
tensors_to_device,
)
from sparseml.pytorch.utils.logger import BaseLogger
from sparseml.pytorch.utils.loss import DEFAULT_LOSS_KEY, LossWrapper
DEFAULT_LOSS_KEY = "__loss__"
The provided code snippet includes necessary dependencies for implementing the `def_model_backward` function. Write a Python function `def def_model_backward( losses: Dict[str, Tensor], model: Module, scaler: GradScaler = None )` to solve the following problem:
Default function to perform a backwards pass for a model and the calculated losses Calls backwards for the DEFAULT_LOSS_KEY in losses Dict :param model: the model to run the backward for :param losses: the losses dictionary containing named tensors, DEFAULT_LOSS_KEY is expected to exist and backwards is called on that :param scaler: GradScaler object for running in mixed precision with amp. If scaler is not None will call scaler.scale on the loss object. Default is None
Here is the function:
def def_model_backward(
losses: Dict[str, Tensor], model: Module, scaler: GradScaler = None
):
"""
Default function to perform a backwards pass for a model and the calculated losses
Calls backwards for the DEFAULT_LOSS_KEY in losses Dict
:param model: the model to run the backward for
:param losses: the losses dictionary containing named tensors,
DEFAULT_LOSS_KEY is expected to exist and backwards is called on that
:param scaler: GradScaler object for running in mixed precision with amp. If scaler
is not None will call scaler.scale on the loss object. Default is None
"""
# assume loss is at default loss key
loss = losses[DEFAULT_LOSS_KEY]
if scaler is not None:
loss = scaler.scale(loss)
loss.backward() | Default function to perform a backwards pass for a model and the calculated losses Calls backwards for the DEFAULT_LOSS_KEY in losses Dict :param model: the model to run the backward for :param losses: the losses dictionary containing named tensors, DEFAULT_LOSS_KEY is expected to exist and backwards is called on that :param scaler: GradScaler object for running in mixed precision with amp. If scaler is not None will call scaler.scale on the loss object. Default is None |
21,386 | from typing import Optional
from sparseml.core.utils import session_context_manager
from sparseml.evaluation.registry import SparseMLEvaluationRegistry
from sparsezoo.evaluation.results import Result
class SparseMLEvaluationRegistry(EvaluationRegistry):
"""
This class is used to register and retrieve evaluation integrations for
SparseML. It is a subclass of the SparseZoo EvaluationRegistry class.
"""
def resolve(cls, name: str, *args, **kwargs) -> Callable[..., Result]:
"""
Resolve an evaluation integration by name.
:param name: The name of the evaluation integration to resolve
:param args: The arguments to pass to the evaluation integration
:param kwargs: The keyword arguments to pass to the evaluation integration
:return: The evaluation integration associated with the name
"""
collect_integrations(
name=name,
integration_config_path=kwargs.get(
"integration_config_path", INTEGRATION_CONFIG_PATH
),
)
return cls.get_value_from_registry(name=name)
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate( model_path: str, integration: str, datasets: Optional[str] = None, batch_size: int = 1, **kwargs, ) -> Result` to solve the following problem:
Evaluate a target model on a dataset using the specified integration. :param model_path: Path to the model folder or the model stub from SparseZoo/HuggingFace to evaluate. For example, `mgoin/llama2.c-stories15M-quant-pt` :param datasets: The dataset(s) to evaluate on. For example, `open_platypus`. If None, it is left upto the integration to handle the default dataset. :param integration: Name of the eval integration to use. Example, `perplexity` :param batch_size: The batch size to use for evals, defaults to 1 :return: The evaluation result as a Result object
Here is the function:
def evaluate(
model_path: str,
integration: str,
datasets: Optional[str] = None,
batch_size: int = 1,
**kwargs,
) -> Result:
"""
Evaluate a target model on a dataset using the specified integration.
:param model_path: Path to the model folder or the model stub from
SparseZoo/HuggingFace to evaluate. For example,
`mgoin/llama2.c-stories15M-quant-pt`
:param datasets: The dataset(s) to evaluate on. For example,
`open_platypus`. If None, it is left upto the integration
to handle the default dataset.
:param integration: Name of the eval integration to use.
Example, `perplexity`
:param batch_size: The batch size to use for evals, defaults to 1
:return: The evaluation result as a Result object
"""
with session_context_manager():
eval_integration = SparseMLEvaluationRegistry.resolve(
name=integration, datasets=datasets
)
if datasets is None:
# let the integration handle the default dataset
return eval_integration(
model_path=model_path, batch_size=batch_size, **kwargs
)
return eval_integration(
model_path=model_path, datasets=datasets, batch_size=batch_size, **kwargs
) | Evaluate a target model on a dataset using the specified integration. :param model_path: Path to the model folder or the model stub from SparseZoo/HuggingFace to evaluate. For example, `mgoin/llama2.c-stories15M-quant-pt` :param datasets: The dataset(s) to evaluate on. For example, `open_platypus`. If None, it is left upto the integration to handle the default dataset. :param integration: Name of the eval integration to use. Example, `perplexity` :param batch_size: The batch size to use for evals, defaults to 1 :return: The evaluation result as a Result object |
21,387 | import logging
from typing import Any, Dict, List, Union
from sparseml.evaluation.registry import SparseMLEvaluationRegistry
from sparsezoo.evaluation.results import Dataset, Evaluation, Metric, Result
_LOGGER = logging.getLogger(__name__)
class SparseMLLM(HFLM):
"""
SparseML is an open-source model optimization toolkit that enables you to create
inference-optimized sparse models using pruning, quantization, and distillation
algorithms. Models optimized with SparseML can then be exported to the ONNX and
deployed with DeepSparse for GPU-class performance on CPU hardware.
This class is a wrapper around the HuggingFace LM class to enable SparseML
integration with the lm-evaluation-harness
"""
def _create_model(
self,
pretrained: str,
**kwargs,
) -> None:
model_kwargs = kwargs if kwargs else {}
relevant_kwarg_names = [
"revision",
"trust_remote_code",
"offload_folder",
"device",
]
relevant_kwargs = {
k: v for k, v in model_kwargs.items() if k in relevant_kwarg_names
}
model = SparseAutoModelForCausalLM.from_pretrained(
pretrained, **relevant_kwargs
)
self._model = model
def _get_config(self, pretrained: str, **kwargs) -> None:
self._config = SparseAutoConfig.from_pretrained(
pretrained_model_name_or_path=pretrained, **kwargs
)
def _format_lm_eval_raw_results(results: Dict[str, Any]) -> List[Evaluation]:
"""
Format the raw results from lm_evaluation_harness into a list of
Evaluation objects.
:param results: the raw results from lm_evaluation_harness
:return: the formatted results as a list of Evaluation objects
"""
formatted_results = []
for dataset_name, dataset_result in results["results"].items():
metrics = [
Metric(name=metric_name, value=metric_value)
for metric_name, metric_value in dataset_result.items()
if isinstance(metric_value, (float, int))
]
dataset = Dataset(
type=None, name=dataset_name, config=results["config"], split=None
)
evaluation = Evaluation(
task=LM_EVALUATION_HARNESS,
dataset=dataset,
metrics=metrics,
samples=None,
)
formatted_results.append(evaluation)
return formatted_results
class SparseAutoTokenizer(AutoTokenizer):
"""
SparseML wrapper for the AutoTokenizer class
"""
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
A wrapper around the AutoTokenizer.from_pretrained method that
enables the loading of tokenizer from SparseZoo stubs
If a SparseZoo stub is passed, the all the available tokenizer
files are downloaded and the path to the directory containing the
files is passed to the AutoTokenizer.from_pretrained method
:param pretrained_model_name_or_path: the name of or path to the model to load
:return tokenizer: the loaded tokenizer from pretrained
"""
if str(pretrained_model_name_or_path).startswith("zoo:"):
with main_process_first_context():
model = Model(pretrained_model_name_or_path)
for file_name in POSSIBLE_TOKENIZER_FILES:
# go over all the possible tokenizer files
# and if detected, download them
file = model.deployment.get_file(file_name)
if file is not None:
tokenizer_file = file
tokenizer_file.download()
pretrained_model_name_or_path = os.path.dirname(tokenizer_file.path)
return super().from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `lm_eval_harness` function. Write a Python function `def lm_eval_harness( model_path, datasets: Union[str, List[str]] = "wikitext", batch_size: int = 1, **kwargs, ) -> Result` to solve the following problem:
Run the lm-evaluation-harness on the given target model :param model-path: the target model to evaluate, can be path to a local model directory or a SparseZoo/Huggingface stub :param datasets: the datasets to evaluate on, can be a string or list of strings, or a command separated string :param batch_size: the batch size to use for evaluation :param kwargs: additional keyword arguments to pass to the lm-evaluation-harness. For example, `limit`
Here is the function:
def lm_eval_harness(
model_path,
datasets: Union[str, List[str]] = "wikitext",
batch_size: int = 1,
**kwargs,
) -> Result:
"""
Run the lm-evaluation-harness on the given target model
:param model-path: the target model to evaluate, can be path to
a local model directory or a SparseZoo/Huggingface stub
:param datasets: the datasets to evaluate on, can be a string or
list of strings, or a command separated string
:param batch_size: the batch size to use for evaluation
:param kwargs: additional keyword arguments to pass to the
lm-evaluation-harness. For example, `limit`
"""
kwargs["limit"] = int(limit) if (limit := kwargs.get("limit")) else None
tokenizer = SparseAutoTokenizer.from_pretrained(model_path)
model = SparseMLLM(pretrained=model_path, tokenizer=tokenizer, **kwargs)
if kwargs.get("limit"):
_LOGGER.warning(
"WARNING: --limit SHOULD ONLY BE USED FOR TESTING. "
"REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
)
tasks.initialize_tasks()
if datasets is None:
task_names = tasks.ALL_TASKS
else:
datasets = datasets if isinstance(datasets, str) else ",".join(datasets)
task_names = utils.pattern_match(datasets.split(","), tasks.ALL_TASKS)
_LOGGER.info(f"Selected Tasks: {task_names}")
results_raw = evaluator.simple_evaluate(
model=model,
tasks=task_names,
batch_size=batch_size,
**kwargs,
)
results = Result(
raw=results_raw,
formatted=_format_lm_eval_raw_results(results_raw),
)
return results | Run the lm-evaluation-harness on the given target model :param model-path: the target model to evaluate, can be path to a local model directory or a SparseZoo/Huggingface stub :param datasets: the datasets to evaluate on, can be a string or list of strings, or a command separated string :param batch_size: the batch size to use for evaluation :param kwargs: additional keyword arguments to pass to the lm-evaluation-harness. For example, `limit` |
21,388 | from typing import List, Optional, Union
from sparseml.transformers.utils.sparse_model import SparseAutoModelForCausalLM
from sparseml.transformers.utils.sparse_tokenizer import SparseAutoTokenizer
try:
import numpy
import torch
from datasets import Dataset as HuggingFaceDataset
from datasets import load_dataset
from torch.nn import CrossEntropyLoss
from tqdm import tqdm
except ImportError as err:
raise ImportError(
"perplexity evaluation requires the following packages to be installed: "
"datasets, numpy, torch, tqdm, transformers kindly install these packages "
"using `pip install sparseml[transformers, torch]`"
) from err
from sparseml.evaluation.registry import SparseMLEvaluationRegistry
from sparsezoo.evaluation.results import Dataset, Evaluation, Metric, Result
def _infer_dataset_config_name(datasets: str):
"""
:param datasets: The name of the dataset to load
:return: The name of the dataset config to load
"""
if datasets == "wikitext":
return "wikitext-2-raw-v1"
return None
def _load_perplexity_dataset(
dataset_name: str,
dataset_config_name: str,
text_column_name: Union[str, List[str], None] = None,
split: Optional[str] = None,
limit: Optional[int] = None,
) -> List[str]:
"""
Loads the dataset for perplexity evaluation.
:param dataset_name: The name of the dataset to load
:param dataset_config_name: The name of the dataset config to load
:param text_column_name: The name of the column containing the text data
if None, defaults to "text". If a list of column names is passed, the
columns will be concatenated to form the input text
:param split: The split of the dataset to load, if None uses test split
if available, otherwise uses train split
:param nsamples: The number of samples to load from the dataset
:return: The loaded dataset as a list of strings
"""
dataset: HuggingFaceDataset = _fetch_dataset_split(
dataset_name=dataset_name,
dataset_config_name=dataset_config_name,
split=split,
)
text_column_name: List[str] = _verify_text_column_name(
dataset=dataset, text_column_name=text_column_name
)
inputs = []
for sample in dataset:
input_sample = "".join(sample[column_name] for column_name in text_column_name)
if input_sample != "":
inputs.append(input_sample)
if limit is not None and len(inputs) >= limit:
break
return inputs
class SparseAutoModelForCausalLM(AutoModelForCausalLM):
"""
SparseML wrapper for the AutoModelForCausalLM class
"""
def from_pretrained(
cls,
pretrained_model_name_or_path,
recipe: Optional[Union[str, Path]] = None,
*model_args,
**kwargs,
) -> Module:
"""
A wrapper around the AutoModelForCausalLM.from_pretrained method that
enables the loading of a SparseML recipe file to apply to the model
:param pretrained_model_name_or_path: the name of or path to the model to load
:param recipe: the path to the recipe file to apply to the model. Can be a
string or Path object. If None, a recipe will be searched for in the
pretrained_model_name_or_path directory and applied if found
:return the created model for causal language modeling
"""
def skip(*args, **kwargs):
pass
# Skip the initializer step. This accelerates the loading
# of the models, especially for the quantized models
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
pretrained_model_name_or_path = (
pretrained_model_name_or_path.as_posix()
if isinstance(pretrained_model_name_or_path, Path)
else pretrained_model_name_or_path
)
if pretrained_model_name_or_path.startswith("zoo:"):
_LOGGER.debug(
"Passed zoo stub to SparseAutoModelForCausalLM object. "
"Loading model from SparseZoo training files..."
)
with main_process_first_context():
pretrained_model_name_or_path = download_zoo_training_dir(
zoo_stub=pretrained_model_name_or_path
)
# temporarily set the log level to error, to ignore printing out long missing
# and unexpected key error messages (these are EXPECTED for quantized models)
logger = logging.getLogger("transformers.modeling_utils")
restore_log_level = logger.getEffectiveLevel()
logger.setLevel(level=logging.ERROR)
model = super(AutoModelForCausalLM, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
logger.setLevel(level=restore_log_level)
recipe = resolve_recipe(recipe, pretrained_model_name_or_path)
if recipe:
apply_recipe_structure_to_model(
model=model,
model_path=pretrained_model_name_or_path,
recipe_path=recipe,
)
return model
class SparseAutoTokenizer(AutoTokenizer):
"""
SparseML wrapper for the AutoTokenizer class
"""
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
A wrapper around the AutoTokenizer.from_pretrained method that
enables the loading of tokenizer from SparseZoo stubs
If a SparseZoo stub is passed, the all the available tokenizer
files are downloaded and the path to the directory containing the
files is passed to the AutoTokenizer.from_pretrained method
:param pretrained_model_name_or_path: the name of or path to the model to load
:return tokenizer: the loaded tokenizer from pretrained
"""
if str(pretrained_model_name_or_path).startswith("zoo:"):
with main_process_first_context():
model = Model(pretrained_model_name_or_path)
for file_name in POSSIBLE_TOKENIZER_FILES:
# go over all the possible tokenizer files
# and if detected, download them
file = model.deployment.get_file(file_name)
if file is not None:
tokenizer_file = file
tokenizer_file.download()
pretrained_model_name_or_path = os.path.dirname(tokenizer_file.path)
return super().from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `perplexity_eval` function. Write a Python function `def perplexity_eval( model_path, datasets: str = "wikitext", batch_size: int = 1, device: Optional[str] = None, limit: Optional[int] = None, **kwargs, ) -> Result` to solve the following problem:
Perform perplexity evaluation on a language model. :param model_path: The path to the model to evaluate :param datasets: The name of the dataset to evaluate on :param batch_size: The batch size to use for evaluation :param device: The device to use for evaluation :param limit: The number of samples to evaluate on :param kwargs: Additional arguments for the evaluation
Here is the function:
def perplexity_eval(
model_path,
datasets: str = "wikitext",
batch_size: int = 1,
device: Optional[str] = None,
limit: Optional[int] = None,
**kwargs,
) -> Result:
"""
Perform perplexity evaluation on a language model.
:param model_path: The path to the model to evaluate
:param datasets: The name of the dataset to evaluate on
:param batch_size: The batch size to use for evaluation
:param device: The device to use for evaluation
:param limit: The number of samples to evaluate on
:param kwargs: Additional arguments for the evaluation
"""
if isinstance(datasets, list):
raise NotImplementedError(
"Running perplexity evaluation on multiple datasets is not supported"
)
dataset_config_name = _infer_dataset_config_name(datasets)
task = "text-generation"
split = kwargs.pop("split", None)
model = SparseAutoModelForCausalLM.from_pretrained(model_path)
tokenizer = SparseAutoTokenizer.from_pretrained(model_path)
input_text = _load_perplexity_dataset(
dataset_name=datasets,
dataset_config_name=dataset_config_name,
split=split,
limit=limit,
text_column_name=kwargs.pop("text_column_name", None),
)
add_start_token = True
max_length = None
# Adapted from
# https://github.com/huggingface/evaluate/blob/main/metrics/perplexity/perplexity.py#L103
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(existing_special_tokens) > 0
), "If batch_size > 1, model must have at least one special token to use for "
"padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token and max_length:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. "
"Please use a different model, or set add_start_token=False"
max_tokenized_len = max_length - 1
else:
max_tokenized_len = max_length
# if batch_size is 1, set the pad token to be the eos token
if batch_size == 1:
tokenizer.pad_token = tokenizer.eos_token
# fetch tokenized inputs and attention masks
encodings = tokenizer(
input_text,
add_special_tokens=False,
padding=True,
truncation=True if max_tokenized_len else False,
max_length=max_tokenized_len,
return_tensors="pt",
return_attention_mask=True,
).to(device)
encoded_texts = encodings["input_ids"]
attn_masks = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(
torch.ge(attn_masks.sum(1), 1)
), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)
), "When add_start_token=False, each input text must be at least two "
"tokens long. Run with add_start_token=True if inputting strings of "
"only one token, and remove all empty input strings."
ppls = []
loss_fct = CrossEntropyLoss(reduction="none")
for start_index in tqdm(range(0, len(encoded_texts), batch_size)):
end_index = min(start_index + batch_size, len(encoded_texts))
encoded_batch = encoded_texts[start_index:end_index]
attn_mask = attn_masks[start_index:end_index]
if add_start_token:
bos_tokens_tensor = torch.tensor(
[[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)
).to(device)
# prepend <BOS> token tensor to each input encoding and
# attention mask
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
attn_mask = torch.cat(
[
torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device),
attn_mask,
],
dim=1,
)
labels = encoded_batch
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
shift_logits = out_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
# calculate perplexity for each batch
perplexity_batch = torch.exp(
(
loss_fct(shift_logits.transpose(1, 2), shift_labels)
* shift_attention_mask_batch
).sum(1)
/ shift_attention_mask_batch.sum(1)
)
ppls += perplexity_batch.tolist()
mean_ppl = numpy.mean(ppls)
raw = {"mean_perplexity": mean_ppl}
# wrap the perplexity result in a Result object
eval = Evaluation(
task=task,
dataset=Dataset(
type=task,
name=datasets,
config=dataset_config_name,
split=split,
),
metrics=[Metric(name="perplexity", value=mean_ppl)],
samples=None,
)
return Result(formatted=[eval], raw=raw) | Perform perplexity evaluation on a language model. :param model_path: The path to the model to evaluate :param datasets: The name of the dataset to evaluate on :param batch_size: The batch size to use for evaluation :param device: The device to use for evaluation :param limit: The number of samples to evaluate on :param kwargs: Additional arguments for the evaluation |
21,389 | import importlib
import logging
from contextlib import suppress
from pathlib import Path
from typing import Callable, Dict
import yaml
from sparsezoo.evaluation import EvaluationRegistry
from sparsezoo.evaluation.results import Result
from sparsezoo.utils.registry import standardize_lookup_name
_LOGGER = logging.getLogger(__name__)
INTEGRATION_CONFIG_PATH: Path = Path(__file__).parent / INTEGRATION_CONFIG_NAME
class SparseMLEvaluationRegistry(EvaluationRegistry):
"""
This class is used to register and retrieve evaluation integrations for
SparseML. It is a subclass of the SparseZoo EvaluationRegistry class.
"""
def resolve(cls, name: str, *args, **kwargs) -> Callable[..., Result]:
"""
Resolve an evaluation integration by name.
:param name: The name of the evaluation integration to resolve
:param args: The arguments to pass to the evaluation integration
:param kwargs: The keyword arguments to pass to the evaluation integration
:return: The evaluation integration associated with the name
"""
collect_integrations(
name=name,
integration_config_path=kwargs.get(
"integration_config_path", INTEGRATION_CONFIG_PATH
),
)
return cls.get_value_from_registry(name=name)
def _load_yaml(path: Path):
"""
Load a yaml file from the given path.
:param path: The path to the yaml file
:return: The loaded yaml file
"""
with path.open("r") as file:
return yaml.safe_load(file)
def _standardize_integration_dict(
integrations_location_dict: Dict[str, str],
integration_config_path: Path = INTEGRATION_CONFIG_PATH,
) -> Dict[str, str]:
"""
Standardize the names of the integrations in the given dictionary.
:param integrations_location_dict: Dictionary of integration names to
their locations
:param integration_config_path: The path to the integrations config file
(Used to resolve relative paths to absolute paths)
:return: A copy of the dictionary with the standardized integration names
and the resolved absolute paths to their locations
"""
return {
standardize_lookup_name(name): _resolve_relative_path(
relative_path=location, relative_to=integration_config_path
)
for name, location in integrations_location_dict.items()
}
The provided code snippet includes necessary dependencies for implementing the `collect_integrations` function. Write a Python function `def collect_integrations( name: str, integration_config_path: Path = INTEGRATION_CONFIG_PATH )` to solve the following problem:
This function is used to collect integrations based on name, this method is responsible for triggering the registration with the SparseML Evaluation registry. This is done by importing the module associated with each integration. This function is called automatically when the registry is accessed. The specific integration(s) `callable` must be decorated with the `@SparseMLEvaluationRegistry.register` decorator. :param name: The name of the integration to collect, is case insentitive :param integration_config_path: The path to the integrations config file
Here is the function:
def collect_integrations(
name: str, integration_config_path: Path = INTEGRATION_CONFIG_PATH
):
"""
This function is used to collect integrations based on name, this method
is responsible for triggering the registration with the SparseML Evaluation
registry.
This is done by importing the module associated with each integration.
This function is called automatically when the registry is accessed.
The specific integration(s) `callable` must be decorated with the
`@SparseMLEvaluationRegistry.register` decorator.
:param name: The name of the integration to collect, is case insentitive
:param integration_config_path: The path to the integrations config file
"""
with suppress(KeyError):
# exit early if the integration is already registered
SparseMLEvaluationRegistry.get_value_from_registry(name)
_LOGGER.info(f"Integration {name} already registered")
return
integrations = _standardize_integration_dict(
integrations_location_dict=_load_yaml(path=integration_config_path),
integration_config_path=integration_config_path,
)
name = standardize_lookup_name(name)
if name in integrations:
location = integrations[name]
_LOGGER.debug(f"Auto collecting {name} integration for eval from {location}")
try:
spec = importlib.util.spec_from_file_location(
f"eval_plugin_{name}", location
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
_LOGGER.info(f"Auto collected {name} integration for eval")
except ImportError as import_error:
_LOGGER.warning(f"Collection of {name} integration for eval failed")
raise import_error
else:
raise ValueError(f"No registered integrations found for the given name {name}") | This function is used to collect integrations based on name, this method is responsible for triggering the registration with the SparseML Evaluation registry. This is done by importing the module associated with each integration. This function is called automatically when the registry is accessed. The specific integration(s) `callable` must be decorated with the `@SparseMLEvaluationRegistry.register` decorator. :param name: The name of the integration to collect, is case insentitive :param integration_config_path: The path to the integrations config file |
21,390 | import functools
from typing import Optional
from sparseml.base import check_version
def check_deepsparse_install(
min_version: Optional[str] = None,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the deepsparse package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for deepsparse that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for deepsparse that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if deepsparse is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if deepsparse_err is not None:
if raise_on_error:
raise deepsparse_err
return False
return check_version(
"deepsparse",
min_version,
max_version,
raise_on_error,
alternate_package_names=["deepsparse-nightly", "deepsparse-ent"],
)
The provided code snippet includes necessary dependencies for implementing the `require_deepsparse` function. Write a Python function `def require_deepsparse( min_version: Optional[str] = None, max_version: Optional[str] = None )` to solve the following problem:
Decorator function to require use of deepsparse. Will check that deepsparse package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_deepsparse_install` for more info. param min_version: The minimum version for deepsparse that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for deepsparse that it must be less than or equal to, if unset will require no maximum version. :type max_version: str
Here is the function:
def require_deepsparse(
min_version: Optional[str] = None, max_version: Optional[str] = None
):
"""
Decorator function to require use of deepsparse.
Will check that deepsparse package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_deepsparse_install` for more info.
param min_version: The minimum version for deepsparse that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for deepsparse that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_deepsparse_install(min_version, max_version)
return func(*args, **kwargs)
return _wrapper
return _decorator | Decorator function to require use of deepsparse. Will check that deepsparse package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_deepsparse_install` for more info. param min_version: The minimum version for deepsparse that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for deepsparse that it must be less than or equal to, if unset will require no maximum version. :type max_version: str |
21,391 | import logging
from sparseml.sparsification import SparsificationInfo
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `sparsification_info` function. Write a Python function `def sparsification_info() -> SparsificationInfo` to solve the following problem:
Load the available setup for sparsifying model within deepsparse. :return: The sparsification info for the deepsparse framework :rtype: SparsificationInfo
Here is the function:
def sparsification_info() -> SparsificationInfo:
"""
Load the available setup for sparsifying model within deepsparse.
:return: The sparsification info for the deepsparse framework
:rtype: SparsificationInfo
"""
_LOGGER.debug("getting sparsification info for deepsparse")
info = SparsificationInfo(modifiers=[])
_LOGGER.info("retrieved sparsification info for deepsparse: %s", info)
return info | Load the available setup for sparsifying model within deepsparse. :return: The sparsification info for the deepsparse framework :rtype: SparsificationInfo |
21,392 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.deepsparse.base import check_deepsparse_install
from sparseml.deepsparse.sparsification import sparsification_info
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.sparsification import SparsificationInfo
from sparsezoo import File, Model
def detect_framework(item: Any) -> Framework:
"""
Detect the supported ML framework for a given item specifically for the
deepsparse package.
Supported input types are the following:
- A Framework enum
- A string of any case representing the name of the framework
(deepsparse, onnx, keras, pytorch, tensorflow_v1)
- A supported file type within the framework such as model files:
(onnx, pth, h5, pb)
- An object from a supported ML framework such as a model instance
If the framework cannot be determined, will return Framework.unknown
:param item: The item to detect the ML framework for
:type item: Any
:return: The detected framework from the given item
:rtype: Framework
"""
framework = Framework.unknown
if isinstance(item, Framework):
_LOGGER.debug("framework detected from Framework instance")
framework = item
elif isinstance(item, str) and item.lower().strip() in Framework.__members__:
_LOGGER.debug("framework detected from Framework string instance")
framework = Framework[item.lower().strip()]
elif isinstance(item, str) and (
"deepsparse" in item.lower().strip() or "deep sparse" in item.lower().strip()
):
_LOGGER.debug("framework detected from deepsparse text")
# string, check if it's a string saying deepsparse first
framework = Framework.deepsparse
elif isinstance(item, str) and ".onnx" in item.lower().strip():
_LOGGER.debug("framework detected from .onnx")
# string, check if it's a file url or path that ends with onnx extension
framework = Framework.deepsparse
elif isinstance(item, Model) or isinstance(item, File):
_LOGGER.debug("framework detected from SparseZoo instance")
# sparsezoo model/file, deepsparse supports these natively
framework = Framework.deepsparse
return framework
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
The provided code snippet includes necessary dependencies for implementing the `is_supported` function. Write a Python function `def is_supported(item: Any) -> bool` to solve the following problem:
:param item: The item to detect the support for :type item: Any :return: True if the item is supported by deepsparse, False otherwise :rtype: bool
Here is the function:
def is_supported(item: Any) -> bool:
"""
:param item: The item to detect the support for
:type item: Any
:return: True if the item is supported by deepsparse, False otherwise
:rtype: bool
"""
framework = detect_framework(item)
return framework == Framework.deepsparse | :param item: The item to detect the support for :type item: Any :return: True if the item is supported by deepsparse, False otherwise :rtype: bool |
21,393 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.deepsparse.base import check_deepsparse_install
from sparseml.deepsparse.sparsification import sparsification_info
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.sparsification import SparsificationInfo
from sparsezoo import File, Model
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
def get_version(
package_name: str,
raise_on_error: bool,
alternate_package_names: Optional[List[str]] = None,
) -> Optional[str]:
"""
:param package_name: The name of the full package, as it would be imported,
to get the version for
:type package_name: str
:param raise_on_error: True to raise an error if package is not installed
or couldn't be imported, False to return None
:type raise_on_error: bool
:param alternate_package_names: List of alternate names to look for the package
under if package_name is not found. Useful for nightly builds.
:type alternate_package_names: Optional[List[str]]
:return: the version of the desired package if detected, otherwise raises an error
:rtype: str
"""
current_version: Optional[str] = None
version_err = None
try:
current_version = pkg_resources.get_distribution(package_name).version
except Exception as err:
version_err = err
if version_err and alternate_package_names:
next_package = alternate_package_names.pop()
return get_version(next_package, raise_on_error, alternate_package_names)
if version_err and raise_on_error:
raise ImportError(
f"error while getting current version for {package_name}: {version_err}"
)
return current_version if not version_err else None
def check_deepsparse_install(
min_version: Optional[str] = None,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the deepsparse package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for deepsparse that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for deepsparse that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if deepsparse is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if deepsparse_err is not None:
if raise_on_error:
raise deepsparse_err
return False
return check_version(
"deepsparse",
min_version,
max_version,
raise_on_error,
alternate_package_names=["deepsparse-nightly", "deepsparse-ent"],
)
The provided code snippet includes necessary dependencies for implementing the `framework_info` function. Write a Python function `def framework_info() -> FrameworkInfo` to solve the following problem:
Detect the information for the deepsparse framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for deepsparse :rtype: FrameworkInfo
Here is the function:
def framework_info() -> FrameworkInfo:
"""
Detect the information for the deepsparse framework such as package versions,
availability for core actions such as training and inference,
sparsification support, and inference provider support.
:return: The framework info for deepsparse
:rtype: FrameworkInfo
"""
arch = {}
if check_deepsparse_install(raise_on_error=False):
from deepsparse.cpu import cpu_architecture
arch = cpu_architecture()
cpu_warnings = []
if arch and arch.isa != "avx512":
cpu_warnings.append(
"AVX512 instruction set not detected, inference performance will be limited"
)
if arch and arch.isa != "avx512" and arch.isa != "avx2":
cpu_warnings.append(
"AVX2 and AVX512 instruction sets not detected, "
"inference performance will be severely limited"
)
if arch and not arch.vnni:
cpu_warnings.append(
"VNNI instruction set not detected, "
"quantized inference performance will be limited"
)
cpu_provider = FrameworkInferenceProviderInfo(
name="cpu",
description=(
"Performant CPU provider within DeepSparse specializing in speedup of "
"sparsified models using AVX and VNNI instruction sets"
),
device="cpu",
supported_sparsification=SparsificationInfo(), # TODO: fill in when available
available=check_deepsparse_install(raise_on_error=False),
properties={
"cpu_architecture": arch,
},
warnings=cpu_warnings,
)
return FrameworkInfo(
framework=Framework.deepsparse,
package_versions={
"deepsparse": get_version(
package_name="deepsparse",
raise_on_error=False,
alternate_package_names=["deepsparse-nightly"],
),
"sparsezoo": get_version(
package_name="sparsezoo",
raise_on_error=False,
alternate_package_names=["sparsezoo-nightly"],
),
"sparseml": get_version(
package_name="sparseml",
raise_on_error=False,
alternate_package_names=["sparseml-nightly"],
),
},
sparsification=sparsification_info(),
inference_providers=[cpu_provider],
training_available=False,
sparsification_available=False,
exporting_onnx_available=False,
inference_available=True,
) | Detect the information for the deepsparse framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for deepsparse :rtype: FrameworkInfo |
21,394 | import argparse
import logging
import os
from typing import Optional
import torch
import openpifpaf
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.utils import ModuleExporter
from sparsezoo.utils import validate_onnx
def image_size_warning(basenet_stride, input_w, input_h):
if input_w % basenet_stride != 1:
LOG.warning(
"input width (%d) should be a multiple of basenet "
"stride (%d) + 1: closest are %d and %d",
input_w,
basenet_stride,
(input_w - 1) // basenet_stride * basenet_stride + 1,
((input_w - 1) // basenet_stride + 1) * basenet_stride + 1,
)
if input_h % basenet_stride != 1:
LOG.warning(
"input height (%d) should be a multiple of basenet "
"stride (%d) + 1: closest are %d and %d",
input_h,
basenet_stride,
(input_h - 1) // basenet_stride * basenet_stride + 1,
((input_h - 1) // basenet_stride + 1) * basenet_stride + 1,
)
class ScheduledModifierManager(BaseManager, Modifier):
"""
The base modifier manager, handles managing multiple ScheduledModifers.
| Lifecycle:
| - initialize
| - initialize_loggers
| - modify
| - finalize
:param modifiers: the modifiers to wrap
"""
def from_yaml(
file_path: Union[str, File],
add_modifiers: Optional[List[Modifier]] = None,
recipe_variables: Optional[Union[Dict[str, Any], str]] = None,
metadata: Optional[Dict[str, Any]] = None,
):
"""
Convenience function used to create the manager of multiple modifiers from a
recipe file.
:param file_path: the path to the recipe file to load the modifier from, or
a SparseZoo model stub to load a recipe for a model stored in SparseZoo.
SparseZoo stubs should be preceded by 'zoo:', and can contain an optional
'?recipe_type=<type>' parameter. Can also be a SparseZoo File
object. i.e. '/path/to/local/recipe.md', 'zoo:model/stub/path',
'zoo:model/stub/path?recipe_type=transfer'. Additionally, a raw
yaml str is also supported in place of a file path.
:param add_modifiers: additional modifiers that should be added to the
returned manager alongside the ones loaded from the recipe file
:param recipe_variables: additional arguments to override any root variables
in the recipe with (i.e. num_epochs, init_lr)
:metadata: additional (to the information provided in the recipe) data to be
preserved and utilized in the future - for reproducibility and completeness.
:return: ScheduledModifierManager() created from the recipe file
"""
recipe_variables = parse_recipe_variables(recipe_variables)
yaml_str = load_recipe_yaml_str(file_path, **recipe_variables)
modifiers = Modifier.load_list(yaml_str)
if add_modifiers:
modifiers.extend(add_modifiers)
validated_metadata = validate_metadata(metadata, yaml_str)
if metadata is not None:
validated_metadata = add_framework_metadata(
validated_metadata, torch_version=torch.__version__
)
manager = ScheduledModifierManager(
modifiers=modifiers, metadata=validated_metadata
)
return manager
def __init__(
self,
modifiers: List[ScheduledModifier],
metadata: Optional[Dict[str, Any]] = None,
):
sparseml_analytics.send_event("python__pytorch__manager__init")
super().__init__(modifiers=modifiers, metadata=metadata)
self._initialize_epoch = 0
def state_dict(self) -> Dict[str, Dict]:
"""
:return: Dictionary to store any state variables for this manager.
Includes all modifiers nested under this manager as sub keys in the dict.
Only modifiers that a non empty state dict are included.
"""
def _modifiers_list_state_dict(modifiers):
return {mod.identifier(): mod.state_dict() for mod in modifiers}
if isinstance(self.modifiers, List):
state_dict = _modifiers_list_state_dict(self.modifiers)
else:
state_dict = {
stage: _modifiers_list_state_dict(modifiers)
for stage, modifiers in self.modifiers
}
return state_dict
def load_state_dict(self, state_dict: Dict[str, Dict], strict: bool = True):
"""
Loads the given state dict into this manager.
All modifiers that match will be loaded.
If any are missing or extra and strict=True, then will raise a KeyError
:param state_dict: dictionary object as generated by this object's state_dict
function
:param strict: True to raise a KeyError for any missing or extra information in
the state dict, False to ignore
:raises IndexError: If any keys in the state dict do not correspond to a valid
index for this manager and strict=True
"""
if isinstance(self.modifiers, List):
modifiers_index = {mod.identifier(): mod for mod in self.modifiers}
else:
if strict:
modifiers_stages = set(self.modifiers.keys())
state_dict_stages = set(state_dict.keys())
diff = modifiers_stages.symmetric_difference(state_dict_stages)
if diff:
raise IndexError(
f"Found extra stages: {state_dict_stages - modifiers_stages}"
f"and missing stages: {modifiers_stages - state_dict_stages}"
)
modifiers_index = {}
for stage_modifiers in self.modifiers.values():
modifiers_index.update(
{mod.identifier(): mod for mod in stage_modifiers}
)
if strict:
modifier_keys = set(modifiers_index.keys())
state_dict_keys = set(state_dict.keys())
diff = modifier_keys.symmetric_difference(state_dict_keys)
if diff:
raise IndexError(
f"Found extra keys: {state_dict_keys - modifier_keys} "
f"and missing keys: {modifier_keys - state_dict_keys}"
)
for key, val in state_dict.items():
if key not in modifiers_index:
continue
modifiers_index[key].load_state_dict(val)
def apply(
self,
module: Module,
epoch: float = math.inf,
loggers: Optional[LoggerManager] = None,
finalize: bool = True,
**kwargs,
):
"""
Applies the lifecycle of each stage in the manager/recipe
by calling into initialize and finalize for each modifier for each stage
:param module: the PyTorch model/module to modify
:param epoch: the epoch to apply the modifier at, defaults to math.inf (end)
:param loggers: Optional logger manager to log the modification process to
:param finalize: True to invoke finalize after initialize, False otherwise.
If training after one shot, set finalize=False to keep modifiers applied.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers (passed to initialize and finalize).
"""
if not self.initialized:
super().initialize(module, epoch, loggers, **kwargs)
self._initialize_epoch = epoch
modifier_lists = (
self._modifiers
if isinstance(self._modifiers, List)
else list(self._modifiers.values())
)
for modifier_list in modifier_lists:
self._initialize_modifiers(
modifier_list, module, epoch, loggers=loggers, **kwargs
)
if finalize:
self._finalize_modifiers(modifier_list, module, **kwargs)
def apply_structure(
self,
module: Module,
epoch: float = 0.0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
finalize: bool = False,
**kwargs,
):
"""
Initialize/apply the modifier for a given model/module at the given epoch
if the modifier affects the structure of the module such as
quantization, layer pruning, or filter pruning.
Calls into initialize(module, epoch, loggers, **kwargs) if structured.
:param module: the PyTorch model/module to modify
:param epoch: the epoch to apply the modifier at, defaults to 0.0 (start)
:param loggers: Optional logger manager to log the modification process to
:param finalize: True to invoke finalize after initialize, False otherwise.
Set finalize to True and epoch to math.inf for one shot application.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers (passed to initialize and finalize).
"""
self._initialize_epoch = epoch
for mod in self.iter_modifiers():
mod.apply_structure(module, epoch, loggers, finalize, **kwargs)
def initialize(
self,
module: Module,
epoch: float = 0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
**kwargs,
):
"""
Handles any initialization of the manager for the given model/module.
epoch and steps_per_epoch can optionally be passed in to initialize the manager
and module at a specific point in the training process.
If loggers is not None, will additionally call initialize_loggers.
:param module: the PyTorch model/module to modify
:param epoch: The epoch to initialize the manager and module at.
Defaults to 0 (start of the training process)
:param loggers: Optional logger manager to log the modification process to
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().initialize(module, epoch, loggers, **kwargs)
self._initialize_epoch = epoch
self._initialize_modifiers(
self.iter_modifiers(), module, epoch, loggers, **kwargs
)
def initialize_loggers(self, loggers: Union[None, LoggerManager, List[BaseLogger]]):
"""
Handles initializing and setting up the loggers for the contained modifiers.
:param loggers: the logger manager to setup this manager with for logging
important info and milestones to
"""
super().initialize_loggers(loggers)
for mod in self.iter_modifiers():
mod.initialize_loggers(self.loggers)
def modify(
self,
module: Module,
optimizer: Optimizer,
steps_per_epoch: int,
wrap_optim: Any = None,
epoch: float = None,
allow_parallel_module: bool = True,
**kwargs,
) -> RecipeManagerStepWrapper:
"""
Modify the given module and optimizer for training aware algorithms such as
pruning and quantization.
Initialize must be called first.
After training is complete, finalize should be called.
:param module: The model/module to modify
:param optimizer: The optimizer to modify
:param steps_per_epoch: The number of optimizer steps (batches) in each epoch
:param wrap_optim: Optional object to wrap instead of the optimizer.
Useful for cases like amp (fp16 training) where a it should be wrapped
in place of the original optimizer since it doesn't always call into
the optimizer.step() function.
:param epoch: Optional epoch that can be passed in to start modifying at.
Defaults to the epoch that was supplied to the initialize function.
:param allow_parallel_module: if False, a DataParallel or
DistributedDataParallel module passed to this function will be unwrapped
to its base module during recipe initialization by referencing
module.module. This is useful so a recipe may reference the base module
parameters instead of the wrapped distributed ones. Set to True to not
unwrap the distributed module. Default is True
:param kwargs: Key word arguments that are passed to the intialize call
if initilaize has not been called yet
:return: A wrapped optimizer object. The wrapped object makes all the
original properties for the wrapped object available so it can be
used without any additional code changes.
"""
if epoch is None:
epoch = self._initialize_epoch
if is_parallel_model(module) and not allow_parallel_module:
if allow_parallel_module:
_LOGGER.warning(
"Parallel module detected by ScheduledModifierManager. Note that "
"the base module parameters will be prefixed by 'module.' which "
"may lead to matching issues if unaccounted for in recipe. Run "
"modify() with allow_parallel_module=False to unwrap the parallel "
"module during recipe initialization"
)
else:
_LOGGER.info("Unwrapping parallel module for recipe initialization")
module = module.module # unwrap parallel module
if not self.initialized:
self.initialize(module, epoch, **kwargs)
if wrap_optim is None:
wrap_optim = optimizer
return RecipeManagerStepWrapper(
wrap_optim, optimizer, module, self, epoch, steps_per_epoch
)
def finalize(
self, module: Optional[Module] = None, reset_loggers: bool = True, **kwargs
):
"""
Handles any finalization of the modifier for the given model/module.
Applies any remaining logic and cleans up any hooks or attachments to the model.
:param module: The model/module to finalize the modifier for.
Marked optional so state can still be cleaned up on delete,
but generally should always be passed in.
:param reset_loggers: True to remove any currently attached loggers (default),
False to keep the loggers attached.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().finalize(module, reset_loggers, **kwargs)
self._finalize_modifiers(self.iter_modifiers(), module, reset_loggers, **kwargs)
def update(
self,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
log_updates: bool = True,
):
"""
Handles updating the contained modifiers' states, module, or optimizer
Only calls scheduled_update on the each modifier if modifier.update_ready()
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:param log_updates: True to log the updates for each modifier to the loggers,
False to skip logging
"""
super().update(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
if mod.update_ready(epoch, steps_per_epoch):
mod.scheduled_update(module, optimizer, epoch, steps_per_epoch)
if log_updates:
mod.scheduled_log_update(module, optimizer, epoch, steps_per_epoch)
def loss_update(
self,
loss: Tensor,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
**kwargs,
) -> Tensor:
"""
Optional call that can be made on the optimizer to update the contained
modifiers once loss has been calculated
:param loss: The calculated loss tensor
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:return: the modified loss tensor
"""
super().loss_update(loss, module, optimizer, epoch, steps_per_epoch, **kwargs)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
loss = mod.loss_update(
loss,
module,
optimizer,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
**kwargs,
)
return loss
def optimizer_pre_step(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Called before the optimizer step happens (after backward has been called,
before optimizer.step)
Calls into the contained modifiers
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().optimizer_pre_step(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
mod.optimizer_pre_step(module, optimizer, epoch, steps_per_epoch)
def optimizer_post_step(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Called after the optimizer step happens and weights have updated
Calls into the contained modifiers
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().optimizer_post_step(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
mod.optimizer_post_step(module, optimizer, epoch, steps_per_epoch)
def _initialize_modifiers(
self,
modifiers: Iterable[Modifier],
module: Module,
epoch: float = 0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
**kwargs,
):
if isinstance(modifiers, Modifier):
modifiers = [modifiers]
for mod in modifiers:
if mod.initialized:
# check in case modifier was initialized from apply_structure
continue
mod.initialize(module, epoch, loggers, **kwargs)
def _finalize_modifiers(
self,
modifiers: Iterable[Modifier],
module: Optional[Module] = None,
reset_loggers: bool = True,
**kwargs,
):
if isinstance(modifiers, Modifier):
modifiers = [modifiers]
for mod in modifiers:
mod.finalize(module, reset_loggers, **kwargs)
def export(
save_dir: str,
name: str,
model,
datamodule,
one_shot: Optional[str],
input_width: int,
input_height: int,
):
if one_shot:
manager = ScheduledModifierManager.from_yaml(one_shot)
manager.apply(model)
image_size_warning(model.base_net.stride, input_width, input_height)
# configure
openpifpaf.network.heads.CompositeField3.inplace_ops = False
openpifpaf.network.heads.CompositeField4.inplace_ops = False
exporter = ModuleExporter(model, save_dir)
exporter.export_onnx(
torch.randn(1, 3, input_height, input_width),
name=name,
input_names=["input_batch"],
output_names=[meta.name for meta in datamodule.head_metas],
)
validate_onnx(os.path.join(save_dir, name))
exporter.create_deployment_folder() | null |
21,395 | import argparse
import copy
import logging
import os
import socket
from typing import Dict, Optional, Tuple
import torch
import openpifpaf
from openpifpaf import __version__
from openpifpaf.train import default_output_file
from sparseml.openpifpaf.trainer import SparseMLTrainer
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.utils.helpers import download_framework_model_by_recipe_type
from sparsezoo import Model
LOG = logging.getLogger(__name__)
class SparseMLTrainer(openpifpaf.network.Trainer):
def __init__(
self,
model: torch.nn.Module,
loss,
optimizer,
out,
manager,
checkpoint_manager,
*,
checkpoint_shell=None,
lr_scheduler=None,
device=None,
model_meta_data=None,
):
def loop(
self,
train_scenes: torch.utils.data.DataLoader,
val_scenes: torch.utils.data.DataLoader,
start_epoch=0,
):
def train(self, scenes, epoch):
def write_model(self, epoch, final=True):
def cli():
parser = argparse.ArgumentParser(
prog="python3 -m openpifpaf.train",
usage="%(prog)s [options]",
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--version",
action="version",
version="OpenPifPaf {version}".format(version=__version__),
)
parser.add_argument(
"--recipe", default=None, required=True, help="Path to sparseml recipe"
)
parser.add_argument("-o", "--output", default=None, help="output file")
parser.add_argument("--disable-cuda", action="store_true", help="disable CUDA")
parser.add_argument(
"--ddp",
default=False,
action="store_true",
help="[experimental] DistributedDataParallel",
)
default_local_rank = os.environ.get("LOCAL_RANK")
if default_local_rank is not None:
default_local_rank = int(default_local_rank)
parser.add_argument(
"--local_rank",
default=default_local_rank,
type=int,
help="[experimental] for torch.distributed.launch",
)
parser.add_argument(
"--no-sync-batchnorm",
dest="sync_batchnorm",
default=True,
action="store_false",
help="[experimental] in ddp, to not use syncbatchnorm",
)
openpifpaf.logger.cli(parser)
openpifpaf.network.Factory.cli(parser)
openpifpaf.network.losses.Factory.cli(parser)
SparseMLTrainer.cli(parser)
openpifpaf.encoder.cli(parser)
openpifpaf.optimize.cli(parser)
openpifpaf.datasets.cli(parser)
openpifpaf.show.cli(parser)
openpifpaf.visualizer.cli(parser)
args = parser.parse_args()
openpifpaf.logger.configure(args, LOG)
if args.log_stats:
logging.getLogger("openpifpaf.stats").setLevel(logging.DEBUG)
# DDP with SLURM
slurm_process_id = os.environ.get("SLURM_PROCID")
if args.ddp and slurm_process_id is not None:
if torch.cuda.device_count() > 1:
LOG.warning(
"Expected one GPU per SLURM task but found %d. "
'Try with "srun --gpu-bind=closest ...". Still trying.',
torch.cuda.device_count(),
)
# if there is more than one GPU available, assume that other SLURM tasks
# have access to the same GPUs and assign GPUs uniquely by slurm_process_id
args.local_rank = (
int(slurm_process_id) % torch.cuda.device_count()
if torch.cuda.device_count() > 0
else 0
)
os.environ["RANK"] = slurm_process_id
if not os.environ.get("WORLD_SIZE") and os.environ.get("SLURM_NTASKS"):
os.environ["WORLD_SIZE"] = os.environ.get("SLURM_NTASKS")
LOG.info("found SLURM process id: %s", slurm_process_id)
LOG.info(
"distributed env: master=%s port=%s rank=%s world=%s, "
"local rank (GPU)=%d",
os.environ.get("MASTER_ADDR"),
os.environ.get("MASTER_PORT"),
os.environ.get("RANK"),
os.environ.get("WORLD_SIZE"),
args.local_rank,
)
# add args.device
args.device = torch.device("cpu")
args.pin_memory = False
if not args.disable_cuda and torch.cuda.is_available():
args.device = torch.device("cuda")
args.pin_memory = True
LOG.info(
"neural network device: %s (CUDA available: %s, count: %d)",
args.device,
torch.cuda.is_available(),
torch.cuda.device_count(),
)
# output
if args.output is None:
args.output = default_output_file(args)
os.makedirs("outputs", exist_ok=True)
openpifpaf.network.losses.Factory.configure(args)
SparseMLTrainer.configure(args)
openpifpaf.encoder.configure(args)
openpifpaf.datasets.configure(args)
openpifpaf.show.configure(args)
openpifpaf.visualizer.configure(args)
return args | null |
21,396 | import argparse
import copy
import logging
import os
import socket
from typing import Dict, Optional, Tuple
import torch
import openpifpaf
from openpifpaf import __version__
from openpifpaf.train import default_output_file
from sparseml.openpifpaf.trainer import SparseMLTrainer
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.utils.helpers import download_framework_model_by_recipe_type
from sparsezoo import Model
LOG = logging.getLogger(__name__)
class ScheduledModifierManager(BaseManager, Modifier):
"""
The base modifier manager, handles managing multiple ScheduledModifers.
| Lifecycle:
| - initialize
| - initialize_loggers
| - modify
| - finalize
:param modifiers: the modifiers to wrap
"""
def from_yaml(
file_path: Union[str, File],
add_modifiers: Optional[List[Modifier]] = None,
recipe_variables: Optional[Union[Dict[str, Any], str]] = None,
metadata: Optional[Dict[str, Any]] = None,
):
"""
Convenience function used to create the manager of multiple modifiers from a
recipe file.
:param file_path: the path to the recipe file to load the modifier from, or
a SparseZoo model stub to load a recipe for a model stored in SparseZoo.
SparseZoo stubs should be preceded by 'zoo:', and can contain an optional
'?recipe_type=<type>' parameter. Can also be a SparseZoo File
object. i.e. '/path/to/local/recipe.md', 'zoo:model/stub/path',
'zoo:model/stub/path?recipe_type=transfer'. Additionally, a raw
yaml str is also supported in place of a file path.
:param add_modifiers: additional modifiers that should be added to the
returned manager alongside the ones loaded from the recipe file
:param recipe_variables: additional arguments to override any root variables
in the recipe with (i.e. num_epochs, init_lr)
:metadata: additional (to the information provided in the recipe) data to be
preserved and utilized in the future - for reproducibility and completeness.
:return: ScheduledModifierManager() created from the recipe file
"""
recipe_variables = parse_recipe_variables(recipe_variables)
yaml_str = load_recipe_yaml_str(file_path, **recipe_variables)
modifiers = Modifier.load_list(yaml_str)
if add_modifiers:
modifiers.extend(add_modifiers)
validated_metadata = validate_metadata(metadata, yaml_str)
if metadata is not None:
validated_metadata = add_framework_metadata(
validated_metadata, torch_version=torch.__version__
)
manager = ScheduledModifierManager(
modifiers=modifiers, metadata=validated_metadata
)
return manager
def __init__(
self,
modifiers: List[ScheduledModifier],
metadata: Optional[Dict[str, Any]] = None,
):
sparseml_analytics.send_event("python__pytorch__manager__init")
super().__init__(modifiers=modifiers, metadata=metadata)
self._initialize_epoch = 0
def state_dict(self) -> Dict[str, Dict]:
"""
:return: Dictionary to store any state variables for this manager.
Includes all modifiers nested under this manager as sub keys in the dict.
Only modifiers that a non empty state dict are included.
"""
def _modifiers_list_state_dict(modifiers):
return {mod.identifier(): mod.state_dict() for mod in modifiers}
if isinstance(self.modifiers, List):
state_dict = _modifiers_list_state_dict(self.modifiers)
else:
state_dict = {
stage: _modifiers_list_state_dict(modifiers)
for stage, modifiers in self.modifiers
}
return state_dict
def load_state_dict(self, state_dict: Dict[str, Dict], strict: bool = True):
"""
Loads the given state dict into this manager.
All modifiers that match will be loaded.
If any are missing or extra and strict=True, then will raise a KeyError
:param state_dict: dictionary object as generated by this object's state_dict
function
:param strict: True to raise a KeyError for any missing or extra information in
the state dict, False to ignore
:raises IndexError: If any keys in the state dict do not correspond to a valid
index for this manager and strict=True
"""
if isinstance(self.modifiers, List):
modifiers_index = {mod.identifier(): mod for mod in self.modifiers}
else:
if strict:
modifiers_stages = set(self.modifiers.keys())
state_dict_stages = set(state_dict.keys())
diff = modifiers_stages.symmetric_difference(state_dict_stages)
if diff:
raise IndexError(
f"Found extra stages: {state_dict_stages - modifiers_stages}"
f"and missing stages: {modifiers_stages - state_dict_stages}"
)
modifiers_index = {}
for stage_modifiers in self.modifiers.values():
modifiers_index.update(
{mod.identifier(): mod for mod in stage_modifiers}
)
if strict:
modifier_keys = set(modifiers_index.keys())
state_dict_keys = set(state_dict.keys())
diff = modifier_keys.symmetric_difference(state_dict_keys)
if diff:
raise IndexError(
f"Found extra keys: {state_dict_keys - modifier_keys} "
f"and missing keys: {modifier_keys - state_dict_keys}"
)
for key, val in state_dict.items():
if key not in modifiers_index:
continue
modifiers_index[key].load_state_dict(val)
def apply(
self,
module: Module,
epoch: float = math.inf,
loggers: Optional[LoggerManager] = None,
finalize: bool = True,
**kwargs,
):
"""
Applies the lifecycle of each stage in the manager/recipe
by calling into initialize and finalize for each modifier for each stage
:param module: the PyTorch model/module to modify
:param epoch: the epoch to apply the modifier at, defaults to math.inf (end)
:param loggers: Optional logger manager to log the modification process to
:param finalize: True to invoke finalize after initialize, False otherwise.
If training after one shot, set finalize=False to keep modifiers applied.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers (passed to initialize and finalize).
"""
if not self.initialized:
super().initialize(module, epoch, loggers, **kwargs)
self._initialize_epoch = epoch
modifier_lists = (
self._modifiers
if isinstance(self._modifiers, List)
else list(self._modifiers.values())
)
for modifier_list in modifier_lists:
self._initialize_modifiers(
modifier_list, module, epoch, loggers=loggers, **kwargs
)
if finalize:
self._finalize_modifiers(modifier_list, module, **kwargs)
def apply_structure(
self,
module: Module,
epoch: float = 0.0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
finalize: bool = False,
**kwargs,
):
"""
Initialize/apply the modifier for a given model/module at the given epoch
if the modifier affects the structure of the module such as
quantization, layer pruning, or filter pruning.
Calls into initialize(module, epoch, loggers, **kwargs) if structured.
:param module: the PyTorch model/module to modify
:param epoch: the epoch to apply the modifier at, defaults to 0.0 (start)
:param loggers: Optional logger manager to log the modification process to
:param finalize: True to invoke finalize after initialize, False otherwise.
Set finalize to True and epoch to math.inf for one shot application.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers (passed to initialize and finalize).
"""
self._initialize_epoch = epoch
for mod in self.iter_modifiers():
mod.apply_structure(module, epoch, loggers, finalize, **kwargs)
def initialize(
self,
module: Module,
epoch: float = 0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
**kwargs,
):
"""
Handles any initialization of the manager for the given model/module.
epoch and steps_per_epoch can optionally be passed in to initialize the manager
and module at a specific point in the training process.
If loggers is not None, will additionally call initialize_loggers.
:param module: the PyTorch model/module to modify
:param epoch: The epoch to initialize the manager and module at.
Defaults to 0 (start of the training process)
:param loggers: Optional logger manager to log the modification process to
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().initialize(module, epoch, loggers, **kwargs)
self._initialize_epoch = epoch
self._initialize_modifiers(
self.iter_modifiers(), module, epoch, loggers, **kwargs
)
def initialize_loggers(self, loggers: Union[None, LoggerManager, List[BaseLogger]]):
"""
Handles initializing and setting up the loggers for the contained modifiers.
:param loggers: the logger manager to setup this manager with for logging
important info and milestones to
"""
super().initialize_loggers(loggers)
for mod in self.iter_modifiers():
mod.initialize_loggers(self.loggers)
def modify(
self,
module: Module,
optimizer: Optimizer,
steps_per_epoch: int,
wrap_optim: Any = None,
epoch: float = None,
allow_parallel_module: bool = True,
**kwargs,
) -> RecipeManagerStepWrapper:
"""
Modify the given module and optimizer for training aware algorithms such as
pruning and quantization.
Initialize must be called first.
After training is complete, finalize should be called.
:param module: The model/module to modify
:param optimizer: The optimizer to modify
:param steps_per_epoch: The number of optimizer steps (batches) in each epoch
:param wrap_optim: Optional object to wrap instead of the optimizer.
Useful for cases like amp (fp16 training) where a it should be wrapped
in place of the original optimizer since it doesn't always call into
the optimizer.step() function.
:param epoch: Optional epoch that can be passed in to start modifying at.
Defaults to the epoch that was supplied to the initialize function.
:param allow_parallel_module: if False, a DataParallel or
DistributedDataParallel module passed to this function will be unwrapped
to its base module during recipe initialization by referencing
module.module. This is useful so a recipe may reference the base module
parameters instead of the wrapped distributed ones. Set to True to not
unwrap the distributed module. Default is True
:param kwargs: Key word arguments that are passed to the intialize call
if initilaize has not been called yet
:return: A wrapped optimizer object. The wrapped object makes all the
original properties for the wrapped object available so it can be
used without any additional code changes.
"""
if epoch is None:
epoch = self._initialize_epoch
if is_parallel_model(module) and not allow_parallel_module:
if allow_parallel_module:
_LOGGER.warning(
"Parallel module detected by ScheduledModifierManager. Note that "
"the base module parameters will be prefixed by 'module.' which "
"may lead to matching issues if unaccounted for in recipe. Run "
"modify() with allow_parallel_module=False to unwrap the parallel "
"module during recipe initialization"
)
else:
_LOGGER.info("Unwrapping parallel module for recipe initialization")
module = module.module # unwrap parallel module
if not self.initialized:
self.initialize(module, epoch, **kwargs)
if wrap_optim is None:
wrap_optim = optimizer
return RecipeManagerStepWrapper(
wrap_optim, optimizer, module, self, epoch, steps_per_epoch
)
def finalize(
self, module: Optional[Module] = None, reset_loggers: bool = True, **kwargs
):
"""
Handles any finalization of the modifier for the given model/module.
Applies any remaining logic and cleans up any hooks or attachments to the model.
:param module: The model/module to finalize the modifier for.
Marked optional so state can still be cleaned up on delete,
but generally should always be passed in.
:param reset_loggers: True to remove any currently attached loggers (default),
False to keep the loggers attached.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().finalize(module, reset_loggers, **kwargs)
self._finalize_modifiers(self.iter_modifiers(), module, reset_loggers, **kwargs)
def update(
self,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
log_updates: bool = True,
):
"""
Handles updating the contained modifiers' states, module, or optimizer
Only calls scheduled_update on the each modifier if modifier.update_ready()
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:param log_updates: True to log the updates for each modifier to the loggers,
False to skip logging
"""
super().update(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
if mod.update_ready(epoch, steps_per_epoch):
mod.scheduled_update(module, optimizer, epoch, steps_per_epoch)
if log_updates:
mod.scheduled_log_update(module, optimizer, epoch, steps_per_epoch)
def loss_update(
self,
loss: Tensor,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
**kwargs,
) -> Tensor:
"""
Optional call that can be made on the optimizer to update the contained
modifiers once loss has been calculated
:param loss: The calculated loss tensor
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:return: the modified loss tensor
"""
super().loss_update(loss, module, optimizer, epoch, steps_per_epoch, **kwargs)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
loss = mod.loss_update(
loss,
module,
optimizer,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
**kwargs,
)
return loss
def optimizer_pre_step(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Called before the optimizer step happens (after backward has been called,
before optimizer.step)
Calls into the contained modifiers
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().optimizer_pre_step(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
mod.optimizer_pre_step(module, optimizer, epoch, steps_per_epoch)
def optimizer_post_step(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Called after the optimizer step happens and weights have updated
Calls into the contained modifiers
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().optimizer_post_step(module, optimizer, epoch, steps_per_epoch)
for mod in self.iter_modifiers():
if not mod.enabled:
continue
mod.optimizer_post_step(module, optimizer, epoch, steps_per_epoch)
def _initialize_modifiers(
self,
modifiers: Iterable[Modifier],
module: Module,
epoch: float = 0,
loggers: Union[None, LoggerManager, List[BaseLogger]] = None,
**kwargs,
):
if isinstance(modifiers, Modifier):
modifiers = [modifiers]
for mod in modifiers:
if mod.initialized:
# check in case modifier was initialized from apply_structure
continue
mod.initialize(module, epoch, loggers, **kwargs)
def _finalize_modifiers(
self,
modifiers: Iterable[Modifier],
module: Optional[Module] = None,
reset_loggers: bool = True,
**kwargs,
):
if isinstance(modifiers, Modifier):
modifiers = [modifiers]
for mod in modifiers:
mod.finalize(module, reset_loggers, **kwargs)
def _load_managers_and_weights_from_checkpoint(
recipe: str, model: torch.nn.Module, checkpoint: Optional[Dict]
) -> Tuple[ScheduledModifierManager, ScheduledModifierManager]:
manager = ScheduledModifierManager.from_yaml(recipe)
checkpoint_manager = None
if checkpoint is None:
return manager, checkpoint_manager
if "checkpoint_recipe" not in checkpoint:
LOG.info(f"No checkpoint recipe in checkpoint: {list(checkpoint.keys())}")
manager.initialize(model)
return manager, checkpoint_manager
LOG.info("Found recipe in checkpoint")
checkpoint_manager = ScheduledModifierManager.from_yaml(
checkpoint["checkpoint_recipe"]
)
if checkpoint["epoch"] == -1:
# restore state from finished recipe
LOG.info(
"Checkpoint was from epoch -1, "
"checkpoint recipe is NOT overriding configured recipe"
)
checkpoint_manager.apply_structure(model, epoch=checkpoint["epoch"])
else:
# resume
LOG.info(
"Checkpoint is a resume checkpoint (epoch > 0), "
"checkpoint recipe is overriding configured recipe"
)
checkpoint_manager.initialize(model, epoch=checkpoint["epoch"])
# NOTE: override manager with the checkpoint's manager
manager = checkpoint_manager
checkpoint_manager = None
# just load state dict from zoo stub
model.load_state_dict(checkpoint["state_dict"])
return manager, checkpoint_manager | null |
21,397 | import functools
from typing import Optional
from sparseml.base import check_version
_ONNX_MIN_VERSION = "1.5.0"
def check_onnx_install(
min_version: Optional[str] = _ONNX_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnx package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnx that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnx that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnx is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnx_err is not None:
if raise_on_error:
raise onnx_err
return False
return check_version("onnx", min_version, max_version, raise_on_error)
The provided code snippet includes necessary dependencies for implementing the `require_onnx` function. Write a Python function `def require_onnx( min_version: Optional[str] = _ONNX_MIN_VERSION, max_version: Optional[str] = None )` to solve the following problem:
Decorator function to require use of onnx. Will check that onnx package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_onnx_install` for more info. param min_version: The minimum version for onnx that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for onnx that it must be less than or equal to, if unset will require no maximum version. :type max_version: str
Here is the function:
def require_onnx(
min_version: Optional[str] = _ONNX_MIN_VERSION, max_version: Optional[str] = None
):
"""
Decorator function to require use of onnx.
Will check that onnx package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_onnx_install` for more info.
param min_version: The minimum version for onnx that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnx that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_onnx_install(min_version, max_version)
return func(*args, **kwargs)
return _wrapper
return _decorator | Decorator function to require use of onnx. Will check that onnx package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_onnx_install` for more info. param min_version: The minimum version for onnx that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for onnx that it must be less than or equal to, if unset will require no maximum version. :type max_version: str |
21,398 | import functools
from typing import Optional
from sparseml.base import check_version
_ORT_MIN_VERSION = "1.0.0"
def check_onnxruntime_install(
min_version: Optional[str] = _ORT_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnxruntime package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnxruntime that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnxruntime that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnxruntime is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnxruntime_err is not None:
if raise_on_error:
raise onnxruntime_err
return False
return check_version(
"onnxruntime",
min_version,
max_version,
raise_on_error,
extra_error_message="Try installing sparseml[onnxruntime] or onnxruntime",
)
The provided code snippet includes necessary dependencies for implementing the `require_onnxruntime` function. Write a Python function `def require_onnxruntime( min_version: Optional[str] = _ORT_MIN_VERSION, max_version: Optional[str] = None )` to solve the following problem:
Decorator function to require use of onnxruntime. Will check that onnxruntime package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_onnxruntime_install` for more info. param min_version: The minimum version for onnxruntime that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for onnxruntime that it must be less than or equal to, if unset will require no maximum version. :type max_version: str
Here is the function:
def require_onnxruntime(
min_version: Optional[str] = _ORT_MIN_VERSION, max_version: Optional[str] = None
):
"""
Decorator function to require use of onnxruntime.
Will check that onnxruntime package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_onnxruntime_install` for more info.
param min_version: The minimum version for onnxruntime that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnxruntime that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_onnxruntime_install(min_version, max_version)
return func(*args, **kwargs)
return _wrapper
return _decorator | Decorator function to require use of onnxruntime. Will check that onnxruntime package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_onnxruntime_install` for more info. param min_version: The minimum version for onnxruntime that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for onnxruntime that it must be less than or equal to, if unset will require no maximum version. :type max_version: str |
21,399 | import logging
from typing import Any, Dict, Generator, List, Optional, Set, Tuple
import numpy
from onnx import ModelProto, numpy_helper
from sparseml.onnx.utils import DataLoader, DeepSparseAnalyzeModelRunner, ONNXGraph
from sparseml.optim import default_pruning_sparsities_perf
from sparseml.sparsification import Analyzer, AnalyzerProgress, ModelInfo
from sparseml.sparsification import (
PruningLossSensitivityMagnitudeAnalyzer as BasePruningLossMagnitudeAnalyzer,
)
from sparseml.sparsification import (
PruningSensitivityResult,
PruningSensitivityResultTypes,
)
class PruningLossSensitivityMagnitudeAnalyzer(BasePruningLossMagnitudeAnalyzer):
"""
Class for performing weight mangitude pruning sensitivity analysis on ONNX models
pruning_loss_analysis_sparsity_levels is an optional run argument to set the
sparsities that this analysis will run at. if not set, the value defaults to
sparsml.optim.default_pruning_sparsities_loss(extended=True)
"""
def validate_model(prunable_param_names: Set[str], model: ModelProto) -> bool:
"""
Validates that all prunable parameter names in the ModelInfo layer_info
exist in the given model and that the given model is of the correct framework
:param prunable_param_names: set of prunable parameter names found in the model
info
:param model: model to validate
:return: True if this is a valid model for weight mangitude pruning analysis.
False otherwise
"""
return _validate_onnx_model_analyzer(prunable_param_names, model)
def get_named_prunable_params(self, model: Any) -> Dict[str, numpy.ndarray]:
"""
loads the prunable parameters in a standardized way so that weight magnitude
analysis may be run on each
:param model: model to load the prunable parameters from
:return: dictionary of prunable parameter name as listed in the ModelInfo to
a numpy array of the values of the parameter
"""
graph = ONNXGraph(model)
return {
layer_name: numpy_helper.to_array(graph.get_init_by_name(layer_name, False))
for layer_name, layer_info in self._model_info.layer_info.items()
if layer_info.prunable
}
class PruningPerformanceSensitivityAnalyzer(Analyzer):
"""
Class for running pruning performance sensitivity analysis on a model against
the DeepSparse engine. deepsparse must be installed to be available.
pruning_perf_analysis_sparsity_levels is an optional run argument to set the
sparisities that this analysis will run at. if not set, the value defaults to
sparsml.optim.default_pruning_sparsities_perf()
:param model_info: ModelInfo object of the model to be analyzed. after
running this analysis, the analysis_results of this ModelInfo object
will be updated
:param batch_size: batch size to run analysis at. Default is 1
:param num_cores: number of CPU cores to run analysis with. Default
is all available on the system
:param iterations_per_check: number of benchmarking iterations
to run for each sparsity level. Default is 10
:param warmup_iterations_per_check: number of warmup iterations
to run at each saprsity level. Default is 5
"""
def __init__(
self,
model_info: ModelInfo,
batch_size: int = 1,
num_cores: Optional[int] = None,
iterations_per_check: int = 10,
warmup_iterations_per_check: int = 5,
):
self._batch_size = batch_size
self._iterations_per_check = iterations_per_check
self._warmup_iterations_per_check = warmup_iterations_per_check
# try grabbing default max cores if needed; for tracking purposes
try:
from deepsparse.cpu import cpu_details
self._num_cores = num_cores or cpu_details()[0]
except Exception:
self._num_cores = num_cores
super().__init__(model_info)
def available(cls, model_info: ModelInfo, **kwargs) -> bool:
"""
Determines if given the available kwargs and ModelInfo, that pruning
performance analysis wioth deepsparse is available. `model` must exist in
the given keyword arguments and be an onnx ModelProto with all prunable
parameters from the ModelInfo available in its initializers list. Additionally
deepsparse must be installed and the DeepSparseAnalyzeModelRunner must be
available
:param model_info: ModelInfo object of the model to be analyzed
:param kwargs: keyword arguments that will be passed in to this analysis. model
must be included for this analysis to be available
:return: True if given the inputs, this analyzer can run its analysis. False
otherwise
"""
if "model" not in kwargs or not DeepSparseAnalyzeModelRunner.available():
return False
return _validate_onnx_model_analyzer(
model_info.get_prunable_param_names(), kwargs["model"]
)
def _initialize_result(self) -> PruningSensitivityResult:
return PruningSensitivityResult(
PruningSensitivityResultTypes.PERF,
attributes=dict(
batch_size=self._batch_size,
num_cores=self._num_cores,
iterations_per_check=self._iterations_per_check,
warmup_iterations_per_check=self._warmup_iterations_per_check,
),
)
def _run_iter(
self,
**kwargs,
) -> Generator[Tuple[AnalyzerProgress, PruningSensitivityResult], None, None]:
sparsity_levels = (
kwargs["pruning_perf_analysis_sparsity_levels"]
if "pruning_perf_analysis_sparsity_levels" in kwargs
else default_pruning_sparsities_perf()
)
num_steps = len(sparsity_levels)
model = kwargs["model"]
data_loader = DataLoader.from_model_random(model, self._batch_size, -1)
# build map of possible layer identifiers to prunable param name
id_to_param_name = {}
param_names = self._model_info.get_prunable_param_names()
for param_name in param_names:
layer_info = self._model_info.layer_info[param_name]
# by output id
output_id = layer_info.attributes.get("node_output_id")
if output_id is not None:
id_to_param_name[output_id] = param_name
# by node name
node_name = layer_info.attributes.get("node_name")
if node_name is not None:
id_to_param_name[node_name] = param_name
# directly match to param name
id_to_param_name[param_name] = param_names
runner = DeepSparseAnalyzeModelRunner(model, self._batch_size, self._num_cores)
for idx, sparsity in enumerate(sparsity_levels):
if sparsity <= 1e-9:
sparsity = None # to enforce dense execution
yield AnalyzerProgress(step=idx, total_steps=num_steps), self.result
results = runner.run(
data_loader,
show_progress=False,
num_iterations=self._iterations_per_check,
num_warmup_iterations=self._warmup_iterations_per_check,
imposed_ks=sparsity,
max_steps=1,
)[0][0]
_LOGGER.debug(
"measured perf results for one shot sparsity {}".format(sparsity)
)
# model sparsity -> average time in seconds
self.result.add_model_sparsity_result(
sparsity or 0.0, results["average_total_time"] / 1000.0
)
for layer in results["layer_info"]:
layer_name = id_to_param_name.get(
layer["canonical_name"],
id_to_param_name.get(layer["name"]), # fallback to internal name
)
if layer_name is not None:
self.result.add_layer_sparsity_result(
layer_name,
sparsity if sparsity is not None else 0.0,
layer["average_run_time_in_ms"] / 1000.0,
)
yield AnalyzerProgress(step=num_steps, total_steps=num_steps), self.result
The provided code snippet includes necessary dependencies for implementing the `get_analyzer_impls` function. Write a Python function `def get_analyzer_impls() -> List[Analyzer]` to solve the following problem:
:return: list of ONNX Analyzer implementations
Here is the function:
def get_analyzer_impls() -> List[Analyzer]:
"""
:return: list of ONNX Analyzer implementations
"""
return [
PruningLossSensitivityMagnitudeAnalyzer,
PruningPerformanceSensitivityAnalyzer,
] | :return: list of ONNX Analyzer implementations |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.