id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,200 | import logging
import math
import os
from abc import ABC, abstractmethod
from functools import wraps
from typing import Any, Dict, List, Optional, Union
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Module, Parameter
from torch.nn.parallel.parallel_apply import parallel_apply
import GPUtil
from sparseml.pytorch.sparsification.modifier import ModifierProp, PyTorchModifierYAML
from sparseml.pytorch.sparsification.pruning.mask_creator import (
PruningMaskCreator,
get_mask_creator_default,
)
from sparseml.pytorch.sparsification.pruning.modifier_pruning_base import (
BaseGradualPruningModifier,
)
from sparseml.pytorch.sparsification.pruning.scorer import PruningParamsGradScorer
from sparseml.pytorch.utils import GradSampler
from sparseml.pytorch.utils.logger import BaseLogger
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `cache_gpu_mem_return` function. Write a Python function `def cache_gpu_mem_return(func)` to solve the following problem:
Cache previous return of GPUtil to be re-used in case future GPUtil call fails to detect available devices.
Here is the function:
def cache_gpu_mem_return(func):
"""
Cache previous return of GPUtil to be re-used in case future GPUtil call fails to
detect available devices.
"""
prev_return = {}
safety_scale = 0.8
@wraps(func)
def cached_gpu_mem_func(device_idx=[], clear_cache=True):
key = str(device_idx)
try:
prev_return[key] = func(device_idx, clear_cache)
return prev_return[key]
except Exception:
_LOGGER.warning(
f"[M-FAC] Failed to get GPU available memory. Using previous memory "
f" read scaled down to {safety_scale*100:.2f}% for a safety margin"
)
if key not in prev_return:
_LOGGER.warning(
"[M-FAC] No cached memory usage found for this set of GPUs. "
"Defaulting to CPU for M-FAC calculations"
)
return []
else:
return [mem * safety_scale for mem in prev_return[key]]
return cached_gpu_mem_func | Cache previous return of GPUtil to be re-used in case future GPUtil call fails to detect available devices. |
21,201 | import logging
import math
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.sparsification.modifier import ModifierProp, PyTorchModifierYAML
from sparseml.pytorch.sparsification.pruning.mask_creator import (
PruningMaskCreator,
get_mask_creator_default,
)
from sparseml.pytorch.sparsification.pruning.modifier_pruning_base import (
BaseGradualPruningModifier,
)
from sparseml.pytorch.sparsification.pruning.scorer import PruningParamsGradScorer
from sparseml.pytorch.utils import GradSampler
from sparseml.pytorch.utils.logger import BaseLogger
from sparseml.utils import FROM_PARAM_TOKEN
The provided code snippet includes necessary dependencies for implementing the `cosine_schedule` function. Write a Python function `def cosine_schedule(t: float, t_max: float, init_value: float, end_value: float)` to solve the following problem:
Cosine interpolation from init_value to end_value given by the law: f(t) = end_value + (1/2) * (init_value - end_value) (1 + cos(pi t / t_max)) :param t: current timestep :param t_max: maximal timestep :param init_value: initial value :param end_value: final value at t_max
Here is the function:
def cosine_schedule(t: float, t_max: float, init_value: float, end_value: float):
"""
Cosine interpolation from init_value to end_value given by the law:
f(t) = end_value + (1/2) * (init_value - end_value) (1 + cos(pi t / t_max))
:param t: current timestep
:param t_max: maximal timestep
:param init_value: initial value
:param end_value: final value at t_max
"""
return end_value + (init_value - end_value) * 0.5 * (
1 + math.cos(math.pi * t / t_max)
) | Cosine interpolation from init_value to end_value given by the law: f(t) = end_value + (1/2) * (init_value - end_value) (1 + cos(pi t / t_max)) :param t: current timestep :param t_max: maximal timestep :param init_value: initial value :param end_value: final value at t_max |
21,202 | import logging
import math
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim.optimizer import Optimizer
from sparseml.pytorch.sparsification.modifier import ModifierProp, PyTorchModifierYAML
from sparseml.pytorch.sparsification.pruning.mask_creator import (
PruningMaskCreator,
get_mask_creator_default,
)
from sparseml.pytorch.sparsification.pruning.modifier_pruning_base import (
BaseGradualPruningModifier,
)
from sparseml.pytorch.sparsification.pruning.scorer import PruningParamsGradScorer
from sparseml.pytorch.utils import GradSampler
from sparseml.pytorch.utils.logger import BaseLogger
from sparseml.utils import FROM_PARAM_TOKEN
The provided code snippet includes necessary dependencies for implementing the `threshold_fraction` function. Write a Python function `def threshold_fraction(tensor: Tensor, fraction: float) -> None` to solve the following problem:
A function returning the tensor with all but topk fraction elements set to 0. :param tensor: the input tensor :param fraction: fraction of nonzero elements
Here is the function:
def threshold_fraction(tensor: Tensor, fraction: float) -> None:
"""
A function returning the tensor with all but topk fraction
elements set to 0.
:param tensor: the input tensor
:param fraction: fraction of nonzero elements
"""
lookup_idx = round(fraction * tensor.numel())
if lookup_idx == 0:
return tensor
threshold, _ = torch.kthvalue(tensor.reshape(-1), k=lookup_idx)
return torch.where(tensor > threshold, 1.0, 0.0) | A function returning the tensor with all but topk fraction elements set to 0. :param tensor: the input tensor :param fraction: fraction of nonzero elements |
21,203 | import logging
from copy import deepcopy
from typing import Dict, List, Optional, Union
import torch
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
from sparseml.optim import ModifierProp
from sparseml.pytorch.sparsification.distillation.modifier_distillation_base import (
BaseDistillationModifier,
)
from sparseml.pytorch.sparsification.modifier import PyTorchModifierYAML
from sparseml.pytorch.utils import BaseLogger
def _create_cache_output_hook(
layer_name: str,
outputs: Dict[str, torch.Tensor],
outputs_shape: Dict[str, torch.Size],
):
def forward_hook_fn(layer, inp, out):
outputs[layer_name] = out
if layer_name not in outputs_shape:
outputs_shape[layer_name] = out.shape
return forward_hook_fn | null |
21,204 | import logging
from copy import deepcopy
from typing import Dict, List, Optional, Union
import torch
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
from sparseml.optim import ModifierProp
from sparseml.pytorch.sparsification.distillation.modifier_distillation_base import (
BaseDistillationModifier,
)
from sparseml.pytorch.sparsification.modifier import PyTorchModifierYAML
from sparseml.pytorch.utils import BaseLogger
_DISTILLATION_TYPES = [torch.nn.Conv2d, torch.nn.Linear]
def _update_layers_by_type(
layer_module: torch.nn.Module,
cached_layers: Dict[str, torch.nn.Module],
name: str = "",
):
if type(layer_module) in _DISTILLATION_TYPES:
cached_layers[name] = layer_module
for layer_module, child in layer_module.named_children():
_update_layers_by_type(
child,
cached_layers,
name + "." + layer_module if name != "" else layer_module,
) | null |
21,205 | import logging
from copy import deepcopy
from typing import Dict, List, Optional, Union
import torch
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
from sparseml.optim import ModifierProp
from sparseml.pytorch.sparsification.distillation.modifier_distillation_base import (
BaseDistillationModifier,
)
from sparseml.pytorch.sparsification.modifier import PyTorchModifierYAML
from sparseml.pytorch.utils import BaseLogger
def _update_layers_by_name(
layer_module: torch.nn.Module,
layer_names: List[str],
cached_layers: Dict[str, torch.nn.Module],
name: str = "",
):
if name in layer_names:
cached_layers[name] = layer_module
for layer_module, child in layer_module.named_children():
_update_layers_by_name(
child,
layer_names,
cached_layers,
name + "." + layer_module if name != "" else layer_module,
) | null |
21,206 | import logging
from copy import deepcopy
from typing import Dict, List, Optional, Union
import torch
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
from sparseml.optim import ModifierProp
from sparseml.pytorch.sparsification.distillation.modifier_distillation_base import (
BaseDistillationModifier,
)
from sparseml.pytorch.sparsification.modifier import PyTorchModifierYAML
from sparseml.pytorch.utils import BaseLogger
DISTILL_PARAM_GROUP_KEY = "distillation_projection_params"
The provided code snippet includes necessary dependencies for implementing the `_get_projection_param_group_idx` function. Write a Python function `def _get_projection_param_group_idx(param_groups: List[Dict]) -> Optional[int]` to solve the following problem:
:return: Optional index where the param group is if it is found.
Here is the function:
def _get_projection_param_group_idx(param_groups: List[Dict]) -> Optional[int]:
"""
:return: Optional index where the param group is if it is found.
"""
for idx, group in enumerate(param_groups):
if DISTILL_PARAM_GROUP_KEY in group:
return idx | :return: Optional index where the param group is if it is found. |
21,207 | import logging
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import (
ScheduledModifier,
ScheduledUpdateModifier,
)
from sparseml.pytorch.utils import BaseLogger, device_of, tensors_module_forward
from sparseml.sparsification import SparsificationTypes
def kl_logsoftmax(
x: Tensor, y: Tensor, temperature: Union[float, Tensor], dim: int = -1
) -> Tensor:
number_items = x.numel() / y.size(dim)
if x.shape != y.shape:
raise ValueError(
"The teacher/student outputs must be of the same shape for distilation"
f" but got Tensors of size {x.shape} and {y.shape}"
)
return (
TF.kl_div(
input=TF.log_softmax(x / temperature, dim=dim),
target=TF.log_softmax(y / temperature, dim=dim),
log_target=True,
reduction="sum",
)
* (temperature**2)
/ number_items
)
def kldiv_loss(
student_outputs,
teacher_outputs,
temperature,
output_keys=None,
dim=-1,
):
distill_head_output_losses = []
if isinstance(student_outputs, Tensor):
distill_head_output_losses.append(
kl_logsoftmax(student_outputs, teacher_outputs, temperature, dim)
)
elif isinstance(student_outputs, Mapping):
for key in output_keys or student_outputs:
distill_head_output_losses.append(
kl_logsoftmax(
student_outputs[key], teacher_outputs[key], temperature, dim
)
)
elif isinstance(student_outputs, Iterable):
for idx in output_keys or range(len(student_outputs)):
distill_head_output_losses.append(
kl_logsoftmax(
student_outputs[idx], teacher_outputs[idx], temperature, dim
)
)
kldiv_output_loss = (
sum(distill_head_output_losses) / len(distill_head_output_losses)
if distill_head_output_losses
else 0.0
)
return kldiv_output_loss | null |
21,208 | from typing import List, Union
from torch.nn import Module
from torch.optim import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import (
PyTorchModifierYAML,
ScheduledModifier,
)
from sparseml.pytorch.utils import BaseLogger
from sparseml.sparsification import SparsificationTypes
from sparseml.utils import convert_to_bool
def _log_weight_decay(
value: float, loggers: List[BaseLogger], epoch: float, steps_per_epoch: int
):
step = round(epoch) if steps_per_epoch <= 0 else round(epoch * steps_per_epoch)
loggers.log_scalar("Modifier Weight Decay", value, step) | null |
21,209 | import logging
from typing import Dict, List, Optional
import torch
from torch import Tensor
from torch.nn import Module, Parameter
from torch.optim import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import (
PyTorchModifierYAML,
ScheduledModifier,
)
from sparseml.sparsification import SparsificationTypes
_LOGGER = logging.getLogger(__name__)
def _module_name_from_param_name(param_name: str) -> str:
return ".".join(param_name.split(".")[:-1])
def _find_pruned_dims(param: Tensor, prune_dim: int) -> Tensor:
# return bool tensor of size num_target_channels s.t. an element is True if all
# values in the corresponding channel have been pruned
num_channels = param.size(prune_dim)
target_channel_grouped_vals = param.transpose(0, prune_dim).reshape(
num_channels, -1
)
return torch.all(target_channel_grouped_vals == 0.0, dim=1)
def _compress_module_param_dim(
param: Parameter,
target_dim: int,
idxs_to_keep: Tensor,
module: Optional[Module] = None,
optimizer: Optional[Optimizer] = None,
):
if param.dim() == 1:
target_dim = 0
if param.size(target_dim) == 1 and idxs_to_keep.numel() > 1:
# DW Conv
return
if param.size(target_dim) % idxs_to_keep.size(0) != 0:
_LOGGER.debug("skipping compression of parameter due to shape incompatibility")
stride = param.data.size(target_dim) // idxs_to_keep.size(0)
if stride > 1:
idxs_to_keep = idxs_to_keep.reshape(-1, 1).expand(-1, stride).reshape(-1)
param.data = (
param.data[idxs_to_keep, ...]
if target_dim == 0
else param.data[:, idxs_to_keep, ...]
)
if param.grad is not None:
param.grad = (
param.grad[idxs_to_keep, ...]
if target_dim == 0
else param.grad[:, idxs_to_keep, ...]
)
if (
optimizer is not None
and param in optimizer.state
and ("momentum_buffer" in optimizer.state[param])
):
optimizer.state[param]["momentum_buffer"] = (
optimizer.state[param]["momentum_buffer"][idxs_to_keep, ...]
if target_dim == 0
else optimizer.state[param]["momentum_buffer"][:, idxs_to_keep, ...]
)
# update module attrs
if module is not None:
# Batch Norm
if param.dim() == 1:
if hasattr(module, "num_features"):
module.num_features = param.size(0)
# BN running mean and var are not stored as Parameters so we must
# update them here
if hasattr(module, "running_mean") and (
module.running_mean.size(0) == idxs_to_keep.size(0)
):
module.running_mean = module.running_mean[idxs_to_keep]
if hasattr(module, "running_var") and (
module.running_var.size(0) == idxs_to_keep.size(0)
):
module.running_var = module.running_var[idxs_to_keep]
# Linear
elif target_dim == 0 and hasattr(module, "out_features"):
module.out_features = param.size(0)
elif target_dim == 1 and hasattr(module, "in_features"):
module.in_features = param.size(1)
# Conv
elif target_dim == 0 and hasattr(module, "out_channels"):
module.out_channels = param.size(0)
elif target_dim == 1 and hasattr(module, "in_channels"):
module.in_channels = param.size(1)
if (
hasattr(module, "groups")
and module.groups > 1
and (hasattr(module, "out_channels") and hasattr(module, "in_channels"))
):
module.groups = param.size(0) // param.size(1)
The provided code snippet includes necessary dependencies for implementing the `compress_strucure_pruned_module` function. Write a Python function `def compress_strucure_pruned_module( module: Module, param_group_dependency_map: Dict[str, List[str]], structure_type: str = "filter", optimizer: Optional[Optimizer] = None, strict: bool = True, )` to solve the following problem:
Removes in-place the given module parameters along either input or output channel dimensions for any of those channels that are completely pruned. Compresses parameters grouped according to the keys in the param_group_dependency_map and will update the opposite channels in the dependency map to remove those same channels :param module: module to compress structurally pruned parameters of :param param_group_dependency_map: mapping of comma separated parameter names that should be pruned together to a list of parameter names whose opposite channels should be updated based on which ones of the group are removed. i.e. {("param.1.name", "param.2.name"): ["param.1.dep.1.name", "other.dep", "other.dep.2], ...} :param structure_type: type of pruning structure used to prune the model and generate the dependency map. Valid options are 'filter' and 'channel'. Default is 'filter' :param optimizer: optional optimizer object to update momentum buffer of for relevant parameters :param strict: if True, all parameters in a pruning group must be sparse along the same indices, will raise a ValueError if not. Default is True
Here is the function:
def compress_strucure_pruned_module(
module: Module,
param_group_dependency_map: Dict[str, List[str]],
structure_type: str = "filter",
optimizer: Optional[Optimizer] = None,
strict: bool = True,
):
"""
Removes in-place the given module parameters along either input or output channel
dimensions for any of those channels that are completely pruned. Compresses
parameters grouped according to the keys in the param_group_dependency_map
and will update the opposite channels in the dependency map to remove those same
channels
:param module: module to compress structurally pruned parameters of
:param param_group_dependency_map: mapping of comma separated parameter names that
should be pruned together to a list of parameter names whose opposite channels
should be updated based on which ones of the group are removed.
i.e. {("param.1.name", "param.2.name"): ["param.1.dep.1.name", "other.dep",
"other.dep.2], ...}
:param structure_type: type of pruning structure used to prune the model and
generate the dependency map. Valid options are 'filter' and 'channel'.
Default is 'filter'
:param optimizer: optional optimizer object to update momentum buffer of for
relevant parameters
:param strict: if True, all parameters in a pruning group must be sparse along
the same indices, will raise a ValueError if not. Default is True
"""
if structure_type not in ["filter", "channel"]:
raise ValueError(
f"invalid structure_type {structure_type}. not in ['filter', 'channel']"
)
param_group_dependency_map = {
tuple(param_group.split(",")): deps
for param_group, deps in param_group_dependency_map.items()
}
named_parameters = dict(module.named_parameters())
named_modules = dict(module.named_modules())
param_name_to_module = {
param_name: named_modules.get(_module_name_from_param_name(param_name))
for param_name in named_parameters.keys()
}
prune_dim = 0 if structure_type == "filter" else 1 # filters stored as param 0
for param_group, dependent_params in param_group_dependency_map.items():
# get pruned channel idxs for each param in the group
# then verify that they are all the same
pruned_channel_idxs = None
all_pruned_channel_idxs = []
for param_name in param_group:
if named_parameters[param_name].size(prune_dim) == 1:
# DW Conv
all_pruned_channel_idxs.append(None)
continue
pruned_idxs = _find_pruned_dims(named_parameters[param_name], prune_dim)
all_pruned_channel_idxs.append(pruned_idxs)
if pruned_channel_idxs is None:
pruned_channel_idxs = pruned_idxs
continue
if pruned_idxs.size(0) < pruned_channel_idxs.size(0):
# find the smallest valid pruned idx set
pruned_idxs, pruned_channel_idxs = pruned_channel_idxs, pruned_idxs
if pruned_channel_idxs.shape == pruned_idxs.shape and torch.all(
pruned_channel_idxs == pruned_idxs
):
continue
elif pruned_idxs.size(0) % pruned_channel_idxs.size(0) != 0:
raise ValueError(
"Incompatible size along pruning dimension for two parameters "
f"in the same pruning group: {pruned_idxs.size(prune_dim)} and "
f"{pruned_channel_idxs.size(prune_dim)}"
)
else:
# check stride and equality
stride = pruned_idxs.size(0) // pruned_channel_idxs.size(0)
upscaled_pruned_channel_idxs = (
pruned_channel_idxs.reshape(-1, 1).expand(-1, stride).reshape(-1)
)
if strict and not torch.all(
upscaled_pruned_channel_idxs == pruned_idxs
):
raise ValueError(
"Parameters in the same pruning group have inconsistent "
"values pruned"
)
if not upscaled_pruned_channel_idxs.numel() == pruned_idxs.numel():
raise ValueError(
"Parameters in the same pruning group have been pruned to "
"different structured sparsity levels"
)
else:
continue
if pruned_channel_idxs is None:
_LOGGER.debug(
f"Pruning group {param_group} found no valid pruning dimensions"
)
unpruned_channel_idxs = ~pruned_channel_idxs
with torch.no_grad():
# compress param group along pruned dimension
for idx, param_name in enumerate(param_group):
idxs_to_keep = (
unpruned_channel_idxs
if strict or all_pruned_channel_idxs[idx] is None
else ~all_pruned_channel_idxs[idx]
)
_compress_module_param_dim(
named_parameters[param_name],
target_dim=prune_dim,
idxs_to_keep=idxs_to_keep,
module=param_name_to_module[param_name],
optimizer=optimizer,
)
# compress dependent params along opposite dimension
for dependent_param_name in dependent_params:
if dependent_param_name not in named_parameters:
continue
_compress_module_param_dim(
named_parameters[dependent_param_name],
target_dim=int(not prune_dim), # 0 <-> 1
idxs_to_keep=unpruned_channel_idxs,
module=param_name_to_module[dependent_param_name],
optimizer=optimizer,
)
if torch.cuda.is_available():
torch.cuda.empty_cache() | Removes in-place the given module parameters along either input or output channel dimensions for any of those channels that are completely pruned. Compresses parameters grouped according to the keys in the param_group_dependency_map and will update the opposite channels in the dependency map to remove those same channels :param module: module to compress structurally pruned parameters of :param param_group_dependency_map: mapping of comma separated parameter names that should be pruned together to a list of parameter names whose opposite channels should be updated based on which ones of the group are removed. i.e. {("param.1.name", "param.2.name"): ["param.1.dep.1.name", "other.dep", "other.dep.2], ...} :param structure_type: type of pruning structure used to prune the model and generate the dependency map. Valid options are 'filter' and 'channel'. Default is 'filter' :param optimizer: optional optimizer object to update momentum buffer of for relevant parameters :param strict: if True, all parameters in a pruning group must be sparse along the same indices, will raise a ValueError if not. Default is True |
21,210 | import logging
from sparseml.sparsification import SparsificationInfo
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `sparsification_info` function. Write a Python function `def sparsification_info() -> SparsificationInfo` to solve the following problem:
Load the available setup for sparsifying model within pytorch. :return: The sparsification info for the pytorch framework :rtype: SparsificationInfo
Here is the function:
def sparsification_info() -> SparsificationInfo:
"""
Load the available setup for sparsifying model within pytorch.
:return: The sparsification info for the pytorch framework
:rtype: SparsificationInfo
"""
_LOGGER.debug("getting sparsification info for pytorch")
info = SparsificationInfo(modifiers=[]) # TODO: fill in once available
_LOGGER.info("retrieved sparsification info for pytorch: %s", info)
return info | Load the available setup for sparsifying model within pytorch. :return: The sparsification info for the pytorch framework :rtype: SparsificationInfo |
21,211 | import logging
import math
import warnings
from itertools import cycle
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
import torch
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import (
PyTorchModifierYAML,
ScheduledModifier,
)
from sparseml.pytorch.sparsification.quantization.helpers import (
configure_module_bn_wrappers,
freeze_bn_stats,
fuse_module_conv_bn_relus,
)
from sparseml.pytorch.sparsification.quantization.legacy_modifier_quantization import (
QuantizationModifier as LegacyQuantizationModifier,
)
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationScheme,
QuantizationSchemeLoadable,
)
from sparseml.pytorch.sparsification.quantization.quantize import (
convert_module_qat_from_schemes,
raise_if_torch_quantization_not_available,
set_quantization_schemes,
)
from sparseml.pytorch.utils import BaseLogger, tensors_module_forward, tensors_to_device
from sparseml.sparsification import SparsificationTypes
class QuantizationModifier(ScheduledModifier):
"""
Enables quantization aware training (QAT) for a given module or its submodules
After the start epoch, the specified module(s) forward pass will emulate
quantized execution and the modifier will be enabled until training is completed.
| Sample yaml:
| !QuantizationModifier
| start_epoch: 0.0
| scheme:
| input_activations:
| num_bits: 8
| symmetric: False
| weights:
| num_bits: 8
| symmetric: True
| scheme_overrides:
| feature_extractor: "default"
| classifier:
| input_activations:
| num_bits: 8
| symmetric: False
| weights: null
| Conv2d:
| input_activations:
| num_bits: 8
| symmetric: True
| ignore: ["ReLU", "input"]
| disable_quantization_observer_epoch: 2.0
| freeze_bn_stats_epoch: 3.0
| model_fuse_fn_name: 'fuse_module'
| strict: True
:param start_epoch: The epoch to start the modifier at
:param scheme: Default QuantizationScheme to use when enabling quantization
in a module. May also be a dictionary to be loaded into the QuantizationScheme
class. A string alias may also be used, supported aliases:
['default', 'deepsparse', 'tensorrt'].
If None, the default scheme (`QuantizationScheme()`) will be used.
Default is None
:param scheme_overrides: optional mapping of module type names or submodule type
names to quantization schemes to override them with. If a scheme is mapped to
'default', then it will use the scheme set in the modifier scheme property
:param ignore: optional list of module class names or submodule names
to not quantize. Default is None
:param disable_quantization_observer_epoch: Epoch to disable updates to the module
quantization observers. At this point, quantized weights and zero points will
not be updated. Leave None to not disable observers during QAT. Default is None
:param freeze_bn_stats_epoch: Epoch to stop the tracking of batch norm stats. Leave
None to not stop tracking batch norm stats during QAT. Default is None
:param model_fuse_fn_name: Name of model function to fuse the model in place prior
to performing QAT. Set as None or 'no_fuse' to skip module fusing. Set as
'conv_bv_relus' to use `sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
Default is None
:param model_fuse_fn_kwargs: dictionary of keyword argument values to be passed
to the model fusing function
:param num_calibration_steps: Number of steps to run post training calibration for.
When None, the entire calibration_dataloader is used
:param strict: if True, will raise an error if any module types or submodules in
scheme_overrides or ignore are not found in a given module. Default True
:param end_epoch: Disabled, setting to anything other than -1 will raise an
exception. For compatibility with YAML serialization only.
"""
def __init__(
self,
start_epoch: float = -1.0,
scheme: QuantizationSchemeLoadable = None,
scheme_overrides: Optional[Dict[str, QuantizationSchemeLoadable]] = None,
ignore: Optional[List[str]] = None,
disable_quantization_observer_epoch: Optional[float] = None,
freeze_bn_stats_epoch: Optional[float] = None,
model_fuse_fn_name: Optional[str] = None,
model_fuse_fn_kwargs: Optional[Dict[str, Any]] = None,
num_calibration_steps: Optional[int] = None,
strict: bool = True,
end_epoch: float = -1.0,
):
raise_if_torch_quantization_not_available()
if end_epoch != -1:
raise ValueError(
"end_epoch is disabled for QuantizationModifier and can only be set to"
" -1. Given {}".format(end_epoch)
)
super().__init__(start_epoch=start_epoch, end_epoch=-1.0, end_comparator=-1)
self._scheme = QuantizationScheme.load(scheme)
self._scheme_overrides = _load_quantization_schemes_dict(
scheme_overrides, self._scheme
)
self._ignore = ignore or []
self._disable_quantization_observer_epoch = disable_quantization_observer_epoch
self._freeze_bn_stats_epoch = freeze_bn_stats_epoch
self._num_calibration_steps = num_calibration_steps
self._calibration_dataloader = None
self._calibration_function = None
self._model_fuse_fn_name = model_fuse_fn_name
self._model_fuse_fn_kwargs = model_fuse_fn_kwargs or {}
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
self._strict = strict
self._qat_enabled = False
self._quantization_observer_disabled = False
self._bn_stats_frozen = False
self._validate_params()
def sparsification_types(self) -> List[SparsificationTypes]:
"""
:return: the sparsification types this modifier instance will apply
"""
return [SparsificationTypes.quantization, SparsificationTypes.structured]
def scheme(self) -> QuantizationSchemeLoadable:
"""
:return: Default QuantizationScheme to use when enabling quantization
in a module. returned as a dictionary for serialization purposes
"""
return self._scheme
def scheme(self, value: QuantizationSchemeLoadable):
"""
:params value: Default QuantizationScheme to use when enabling quantization
in a module. May also be a dictionary to be loaded into the
QuantizationScheme class. If None, the default scheme
(`QuantizationScheme()`) will be used
"""
self._scheme = QuantizationScheme.load(value)
def scheme_overrides(self) -> Optional[Dict[str, QuantizationSchemeLoadable]]:
"""
:return: optional mapping of module type names or submodule type
names to quantization schemes to override them with. If a scheme is mapped
to 'default', then it will use the scheme set in the modifier scheme
property
"""
return self._scheme_overrides
def scheme_overrides(self, value: Optional[Dict[str, QuantizationSchemeLoadable]]):
"""
:params value: optional mapping of module type names or submodule type
names to quantization schemes to override them with. If a scheme is mapped
to 'default', then it will use the scheme set in the modifier scheme
property
"""
self._scheme_overrides = _load_quantization_schemes_dict(value, self._scheme)
def ignore(self) -> List[str]:
"""
:return: optional list of module class names or submodule names to not propagate
quantization schemes to
"""
return self._ignore
def ignore(self, value: Optional[List[str]]):
"""
:params value: optional list of module class names or submodule names
to not propagate quantization schemes to
"""
self._ignore = value or []
def disable_quantization_observer_epoch(self) -> Optional[float]:
"""
:return: Epoch to disable updates to the module
quantization observers. At this point, quantized weights and zero points
will not be updated. When None, observers never disabled during QAT
"""
return self._disable_quantization_observer_epoch
def disable_quantization_observer_epoch(self, value: Optional[float]):
"""
:params value: Epoch to disable updates to the module
quantization observers. At this point, quantized weights and zero points
will not be updated. Set None to not disable observers during QAT
"""
self._disable_quantization_observer_epoch = value
self._validate_params()
def freeze_bn_stats_epoch(self) -> Optional[float]:
"""
:return: Epoch to stop the tracking of batch norm stats. When
None, batch norm stats are track for all of training
"""
return self._freeze_bn_stats_epoch
def freeze_bn_stats_epoch(self, value: Optional[float]):
"""
:params value: Epoch to stop the tracking of batch norm stats. Set
None to not stop tracking batch norm stats during QAT
"""
self._freeze_bn_stats_epoch = value
self._validate_params()
def num_calibration_steps(self) -> Optional[int]:
"""
:return: Number of steps to run post training calibration for.
When None, the entire calibration_dataloader is used
"""
return self._num_calibration_steps
def num_calibration_steps(self, value: Optional[int]):
"""
:params value: Number of steps to run post training calibration for.
When None, the entire calibration_dataloader is used
"""
self._num_calibration_steps = value
def model_fuse_fn_name(self) -> Optional[str]:
"""
:return: Name of model function to fuse the model in place prior
to performing QAT. None sets to default function.
If tensorrt flag is True, default is 'no_fuse', otherwise
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
"""
return self._model_fuse_fn_name
def model_fuse_fn_name(self, value: Optional[str]):
"""
:params value: Name of model function to fuse the model in place prior
to performing QAT. Set None to use the default function
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`. Set as 'no_fuse'
to skip module fusing.
"""
self._model_fuse_fn_name = value
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
self._validate_params()
def model_fuse_fn_kwargs(self) -> Dict[str, Any]:
"""
:return: Dictionary of keyword arguments to be passed to the
model fuse function
"""
return self._model_fuse_fn_kwargs
def strict(self) -> bool:
"""
:return: if True, will raise an error if any module types or submodules in
scheme_overrides or ignore are not found in the given module
"""
return self._strict
def strict(self, value: bool):
"""
:params value: if True, will raise an error if any module types or submodules in
scheme_overrides or ignore are not found in the given module
"""
self._strict = value
def initialize(
self,
module: Module,
epoch: float = 0,
loggers: Optional[List[BaseLogger]] = None,
calibration_dataloader: Optional[Iterable[Tuple[List, Dict[str, Any]]]] = None,
calibration_function: Optional[Callable] = None,
**kwargs,
):
"""
Grab the module / submodule to perform QAT on
:param module: the PyTorch model/module to modify
:param epoch: The epoch to initialize the modifier and module at.
Defaults to 0 (start of the training process)
:param loggers: Optional list of loggers to log the modification process to
:param calibration_dataloader: optional dataloader for running post training
quantization with the given model. if present, calibration will be run
immediately after quantization is enabled
:param calibration_function: An Optional callable to use for
calibration of module parameters post training. Should be able to
accept a batch of inputs along with a module.
Example: func(batch, module), Defaults to tensors_module_forward
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().initialize(module, epoch, loggers, **kwargs)
self._calibration_dataloader = calibration_dataloader
self._calibration_function = calibration_function
self._check_quantization_update(module, epoch, steps_per_epoch=0)
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
If start_pending(), fuses the model, sets the model quantization config,
calls torch.quantization.prepare_qat on the model to begin QAT
If end_pending(), updates the modules layers params to their original
trainable state.
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
self._check_quantization_update(module, epoch, steps_per_epoch)
def update_ready(self, epoch: float, steps_per_epoch: int) -> bool:
"""
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:return: True if the modifier is pending an update and update() should be called
"""
if not self._initialized:
raise RuntimeError("modifier must be initialized first")
if not self._enabled:
return False
pending = self.start_pending(epoch, steps_per_epoch)
pending |= self._freeze_bn_stats_update_ready(epoch)
pending |= self._disable_quantization_observer_update_ready(epoch)
return pending
def advance_epochs(self, ref_start_epoch: float = None):
"""
Advance epoch attributes given a reference start epoch
:param ref_start_epoch: the reference, i.e. new, start epoch
"""
if ref_start_epoch is None:
return
super().advance_epochs(ref_start_epoch=ref_start_epoch)
if self._disable_quantization_observer_epoch is not None:
self._disable_quantization_observer_epoch = (
max(0.0, self._disable_quantization_observer_epoch) + ref_start_epoch
)
if self._freeze_bn_stats_epoch is not None:
self._freeze_bn_stats_epoch = (
max(0.0, self._freeze_bn_stats_epoch) + ref_start_epoch
)
self._validate_params()
def _check_quantization_update(
self, module: Module, epoch: float, steps_per_epoch: int
):
if self.start_pending(epoch, steps_per_epoch) and not self._qat_enabled:
self._enable_module_qat(module)
if self._disable_quantization_observer_update_ready(epoch):
module.apply(torch.quantization.disable_observer)
self._quantization_observer_disabled = True
if self._freeze_bn_stats_update_ready(epoch):
module.apply(freeze_bn_stats)
self._bn_stats_frozen = True
self._log_quantization(module, epoch, steps_per_epoch)
def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:
return (
self._disable_quantization_observer_epoch is not None
and epoch >= self._disable_quantization_observer_epoch
and not self._quantization_observer_disabled
)
def _freeze_bn_stats_update_ready(self, epoch: float) -> bool:
return (
self._freeze_bn_stats_epoch is not None
and epoch >= self._freeze_bn_stats_epoch
and not self._bn_stats_frozen
)
def _enable_module_qat(self, module: Module):
# fuse conv-bn-relu blocks prior to quantization emulation
self._fuse(module)
# add quantization_schemes to target submodules
set_quantization_schemes(
module,
scheme=self._scheme,
scheme_overrides=self._scheme_overrides,
ignore=self._ignore,
strict=self._strict,
)
# fix for freezing batchnorm statistics when not fusing BN with convs.
# pytorch only supports freezing batchnorm statistics for fused modules.
# this fix wraps BN modules adding with a new module class that supports
# methods related to freezing/unfreezing BN statistics.
configure_module_bn_wrappers(module)
# convert target qconfig layers to QAT modules with FakeQuantize
convert_module_qat_from_schemes(module)
self._qat_enabled = True
self._calibrate_if_possible(module)
def _fuse(self, module: Module):
if self.model_fuse_fn_name in [None, "conv_bn_relus"]:
self._model_fuse_fn_kwargs["inplace"] = True
fuse_module_conv_bn_relus(module, **self._model_fuse_fn_kwargs)
elif self.model_fuse_fn_name != "no_fuse":
module_fuse_fn = getattr(module, self._model_fuse_fn_name, None)
if module_fuse_fn is None or not callable(module_fuse_fn):
raise ValueError(
"Invalid model_fuse_fn_name. "
"Module has no callable function {}".format(
self._model_fuse_fn_name
)
)
module_fuse_fn(**self._model_fuse_fn_kwargs)
def _calibrate_if_possible(self, module: Module):
if self.num_calibration_steps == 0 and self._calibration_dataloader:
warnings.warn(
f"num_calibration_steps is {self.num_calibration_steps}."
f"Calibration data loader will not be used."
)
elif self.num_calibration_steps and not self._calibration_dataloader:
raise ValueError(
f"num_calibration_steps is {self.num_calibration_steps}. "
"Calibration data loader is not set. Pass a "
"calibration_data_loader with initialize(...) method."
)
elif not self._calibration_dataloader or not self._qat_enabled:
return
elif self._calibration_dataloader:
self._calibrate(module)
def _calibrate(self, module: Module):
_LOGGER.info("Running quantization calibration using calibration_dataloader")
module_training = module.training
module.eval()
forward_fn: Callable = (
self._calibration_function
if self._calibration_function
else tensors_module_forward
)
model_device = next(module.parameters()).device
_dataloader = (
self._calibration_dataloader
if self.num_calibration_steps is None
else cycle(self._calibration_dataloader)
)
for batch_idx, batch in enumerate(_dataloader):
if self.num_calibration_steps and batch_idx >= self.num_calibration_steps:
break
batch = tensors_to_device(batch, model_device)
with torch.no_grad():
forward_fn(batch, module=module)
if module_training:
module.train()
def _validate_params(self):
self.validate_schedule()
if (
self._disable_quantization_observer_epoch is not None
and self._disable_quantization_observer_epoch < self._start_epoch
):
raise ValueError(
f"disable_quantization_observer_epoch may not be greater than "
f"start_epoch for QuantizationModifier, received: "
f"{self._disable_quantization_observer_epoch} with start_epoch "
f"{self._start_epoch}"
)
if (
self._freeze_bn_stats_epoch is not None
and self._freeze_bn_stats_epoch < self._start_epoch
):
raise ValueError(
"freeze_bn_stats_epoch may not be greater than start_epoch"
" for QuantizationModifier, received: {} with start_epoch {}".format(
self._freeze_bn_stats_epoch, self._start_epoch
)
)
all_schemes = [self._scheme] + list(self._scheme_overrides.values())
if any(scheme.target_hardware == "tensorrt" for scheme in all_schemes) and (
self._model_fuse_fn_name != "no_fuse"
):
_LOGGER.info(
"QuantizationModifier - target hardware tensorrt detected - "
"Disabling model fuse step"
)
self._model_fuse_fn_name = "no_fuse"
def _log_quantization(
self,
module: Module,
epoch: float,
steps_per_epoch: int,
):
"""
Check whether to log an update for the learning rate of the modifier.
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
def _log(tag, value):
self.log_scalar(
tag=tag,
value=value,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
)
# log layer-wise quantization info
num_fake_quantizes = 0
for name, submodule in module.named_modules():
if not isinstance(submodule, torch.quantization.FakeQuantize):
continue
num_fake_quantizes += 1
qrange = submodule.quant_max - submodule.quant_min + 1
num_bits = int(math.log2(qrange))
_log(
tag=f"QuantizationModifier/{name}/num_bits",
value=num_bits,
)
# log global quantization info
_log(
tag="QuantizationModifier/num_fake_quantize_global",
value=num_fake_quantizes,
)
_log(
tag="QuantizationModifier/bn_stats_frozen",
value=1.0 if self._bn_stats_frozen else 0.0,
)
_log(
tag="QuantizationModifier/qat_observers_disabled",
value=1.0 if self._quantization_observer_disabled else 0.0,
)
class QuantizationModifier(ScheduledModifier):
"""
Enables quantization aware training (QAT) for a given module or its submodules
After the start epoch, the specified module(s)' forward pass will emulate
quantized execution and the modifier will be enabled until training is completed.
| Sample yaml:
| !QuantizationModifier
| start_epoch: 0.0
| submodules: ['blocks.0', 'blocks.2']
| model_fuse_fn_name: 'fuse_module'
| disable_quantization_observer_epoch: 2.0
| freeze_bn_stats_epoch: 3.0
| reduce_range: False
| activation_bits: False
:param start_epoch: The epoch to start the modifier at
:param submodules: List of submodule names to perform QAT on. Leave None to quantize
entire model. Default is None
:param model_fuse_fn_name: Name of model function to fuse the model in place prior
to performing QAT. Set as None or 'no_fuse' to skip module fusing. Set as
'conv_bv_relus' to use `sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
Default is None
:param disable_quantization_observer_epoch: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points will
not be updated. Leave None to not disable observers during QAT. Default is None
:param freeze_bn_stats_epoch: Epoch to stop the tracking of batch norm stats. Leave
None to not stop tracking batch norm stats during QAT. Default is None
:param end_epoch: Disabled, setting to anything other than -1 will raise an
exception. For compatibility with YAML serialization only.
:param model_fuse_fn_kwargs: dictionary of keyword argument values to be passed
to the model fusing function
:param quantize_embeddings: if True, will perform QAT on torch.nn.Embedding layers
using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake
quantize embedding weights. Default is True. Models without embedding layers
will be unaffected
:param reduce_range: if True, the quantization range will be reduced by one bit.
This may prevent overflow issues with model execution on certain hardware
Default is False
:param quantize_linear_activations: if True, FakeQuantize ops will be run
for output activations of fully connected layers. Default is True.
:param quantize_conv_activations: if True, FakeQuantize ops will be run
for output activations of convolutional layers. Default is True.
:param quantize_embedding_activations: if True, FakeQuantize ops will be run
for output activations of embedding layers. Default is True.
:param activation_bits: Number of bits to use for setting quant min/max values for
activations. Default 8.
:param weight_bits: Number of bits to use for setting quant min/max values for
weights. Default is 8.
:param num_calibration_steps: Number of steps to run post training calibration for.
When None, the entire calibration_dataloader is used
:param exclude_batchnorm: If True, do not propagate quantization qconfigs to
batch-normalization modules
:param exclude_module_types: optional list of module class names
to not propagate quantization configs to. Default is None
:param custom_quantizable_module_types: optional list of module class names
to be added to the list of quantizable modules. Default is None
:param activation_qconfig_kwargs: Additional kwargs for quantization of
activations.
:param weight_qconfig_kwargs: Additional kwargs for quantization of
weights.
:param tenssorrt: if True sets quantization configuration for compatibility with
explict quantization as supported by TensorRT 8.2.
"""
def __init__(
self,
start_epoch: float = -1.0,
submodules: Union[List[str], None] = None,
model_fuse_fn_name: Union[str, None] = None,
disable_quantization_observer_epoch: Union[float, None] = None,
freeze_bn_stats_epoch: Union[float, None] = None,
end_epoch: float = -1,
model_fuse_fn_kwargs: Dict[str, Any] = None,
quantize_embeddings: bool = True,
reduce_range: bool = False,
quantize_linear_activations: bool = True,
quantize_conv_activations: bool = True,
quantize_embedding_activations: bool = True,
activation_bits: int = 8,
weight_bits: int = 8,
num_calibration_steps: Optional[int] = None,
exclude_batchnorm: bool = True,
exclude_module_types: Optional[List[str]] = None,
custom_quantizable_module_types: Optional[List[str]] = None,
activation_qconfig_kwargs: Optional[Dict[str, Any]] = None,
weight_qconfig_kwargs: Optional[Dict[str, Any]] = None,
tensorrt: bool = False,
):
if torch_quantization is None or torch_intrinsic is None:
raise RuntimeError(
"Unable to import package torch.quantization and/or "
"torch.nn.intrinsic. "
"Try upgrading your PyTorch version to use the QuantizationModifier."
)
if end_epoch != -1:
raise ValueError(
"end_epoch is disabled for QuantizationModifier and can only be set to"
" -1. Given {}".format(end_epoch)
)
super().__init__(start_epoch=start_epoch, end_epoch=-1.0, end_comparator=-1)
self._start_epoch = start_epoch
self._submodules = submodules
self._model_fuse_fn_name = model_fuse_fn_name
self._model_fuse_fn_kwargs = model_fuse_fn_kwargs or {}
self._disable_quantization_observer_epoch = disable_quantization_observer_epoch
self._freeze_bn_stats_epoch = freeze_bn_stats_epoch
self._quantize_embeddings = quantize_embeddings
self._reduce_range = reduce_range
self._quantize_linear_activations = quantize_linear_activations
self._quantize_conv_activations = quantize_conv_activations
self._quantize_embedding_activations = quantize_embedding_activations
self._activation_bits = activation_bits
self._weight_bits = weight_bits
self._exclude_batchnorm = exclude_batchnorm
self._exclude_module_types = exclude_module_types
self._custom_quantizable_module_types = custom_quantizable_module_types
self._modules_to_quantize = None
self._qat_enabled = False
self._quantization_observer_disabled = False
self._bn_stats_frozen = False
self._activation_qconfig_kwargs = activation_qconfig_kwargs
self._weight_qconfig_kwargs = weight_qconfig_kwargs
self._tensorrt = tensorrt
self._calibration_dataloader = None
self._calibration_function = None
self._num_calibration_steps = num_calibration_steps
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
if isinstance(self._submodules, list):
self._submodules = set(self._submodules)
self._validate_params()
def sparsification_types(self) -> List[SparsificationTypes]:
"""
:return: the sparsification types this modifier instance will apply
"""
return [SparsificationTypes.quantization, SparsificationTypes.structured]
def submodules(self) -> Union[List[str], None]:
"""
:return: List of submodule names to perform QAT on. None quantizes the entire
model
"""
return list(self._submodules) if self._submodules is not None else None
def submodules(self, value: Union[List[str], None]):
"""
:params value: List of submodule names to perform QAT on. Set None to quantize
entire model
"""
self._submodules = value
if isinstance(self._submodules, list):
self._submodules = set(self._submodules)
self._validate_params()
def model_fuse_fn_name(self) -> Union[str, None]:
"""
:return: Name of model function to fuse the model in place prior
to performing QAT. None sets to default function.
If tensorrt flag is True, default is 'no_fuse', otherwise
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`.
"""
if self.tensorrt:
_LOGGER.info(
"Overriding model_fuse_fn_name to False because tensorrt flag is True."
)
fuse_fn = (
self._model_fuse_fn_name if self._model_fuse_fn_name else "no_fuse"
)
else:
fuse_fn = (
self._model_fuse_fn_name
if self._model_fuse_fn_name
else "conv_bn_relus"
)
return fuse_fn
def model_fuse_fn_name(self, value: Union[str, None]):
"""
:params value: Name of model function to fuse the model in place prior
to performing QAT. Set None to use the default function
`sparseml.pytorch.utils.fuse_module_conv_bn_relus`. Set as 'no_fuse'
to skip module fusing.
"""
self._model_fuse_fn_name = value
if (
isinstance(self._model_fuse_fn_name, str)
and self._model_fuse_fn_name.lower() == "none"
):
self._model_fuse_fn_name = None
self._validate_params()
def disable_quantization_observer_epoch(self) -> Union[float, None]:
"""
:return: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points
will not be updated. When None, observers never disabled during QAT
"""
return self._disable_quantization_observer_epoch
def disable_quantization_observer_epoch(self, value: Union[float, None]):
"""
:params value: Epoch to disable updates to the module's
quantization observers. After this point, quantized weights and zero points
will not be updated. Set None to not disable observers during QAT
"""
self._disable_quantization_observer_epoch = value
self._validate_params()
def freeze_bn_stats_epoch(self) -> Union[float, None]:
"""
:return: Epoch to stop the tracking of batch norm stats. When
None, batch norm stats are track for all of training
"""
return self._freeze_bn_stats_epoch
def freeze_bn_stats_epoch(self, value: Union[float, None]):
"""
:params value: Epoch to stop the tracking of batch norm stats. Set
None to not stop tracking batch norm stats during QAT
"""
self._freeze_bn_stats_epoch = value
self._validate_params()
def quantize_embeddings(self) -> bool:
"""
:return: if True, will perform QAT on torch.nn.Embedding layers
using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake
quantize embedding weights
"""
return self._quantize_embeddings
def quantize_embeddings(self, value: bool):
"""
:params value: if True, will perform QAT on torch.nn.Embedding layers
using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake
quantize embedding weights
"""
self._quantize_embeddings = value
def reduce_range(self) -> bool:
"""
:return: if True, the quantization range will be reduced by one
This may prevent overflow issues with model execution on certain hardware
"""
return self._reduce_range
def quantize_linear_activations(self) -> bool:
"""
:return: if True, FakeQuantize ops will be run for output activations
of fully connected layers
"""
if self.tensorrt:
_LOGGER.info(
"Overriding quantize_linear_activations to False "
"because tensorrt flag is True."
)
return False
else:
return self._quantize_linear_activations
def quantize_conv_activations(self) -> bool:
"""
:return: if True, FakeQuantize ops will be run for output activations
of convolutional layers
"""
if self.tensorrt:
_LOGGER.info(
"Overriding quantize_conv_activations to False "
"because tensorrt flag is True."
)
return False
else:
return self._quantize_conv_activations
def quantize_embedding_activations(self) -> bool:
"""
:return: if True, FakeQuantize ops will be run for output activations
of convolutional layers
"""
if self.tensorrt:
_LOGGER.info(
"Overriding quantize_embedding_activations to False "
"because tensorrt flag is True."
)
return False
else:
return self._quantize_embedding_activations
def custom_quantizable_module_types(self) -> Union[List[str], None]:
"""
:return: optional list of module class names to be included
in list of quantizable modules. Default is None
"""
return self._custom_quantizable_module_types
def exclude_module_types(self) -> Union[List[str], None]:
"""
:return: optional list of module class names to not propagate
quantization configs to. Default is None
"""
return self._exclude_module_types
def exclude_batchnorm(self) -> bool:
"""
:return: if True, do not propagate quantization qconfigs to
batch-normalization modules
"""
return self._exclude_batchnorm
def activation_bits(self) -> Optional[int]:
"""
:return: Number of bits to be use for setting quant min/max values for
activations. Default is None, which will quantize activations to 8 bits.
"""
return self._activation_bits
def weight_bits(self) -> Optional[int]:
"""
:return: Number of bits to be use for setting quant min/max values for
weights. Default is None, which will quantize weights to 8 bits.
"""
return self._weight_bits
def activation_qconfig_kwargs(self) -> Dict[str, Any]:
"""
:return: Dictionary with correct quant_min, quant_max, and dtype values
for activations
"""
return self._activation_qconfig_kwargs
def weight_qconfig_kwargs(self) -> Dict[str, Any]:
"""
:return: Dictionary with correct quant_min, quant_max, and dtype values
for weights
"""
if (
self._weight_qconfig_kwargs is not None
and "observer" in self._weight_qconfig_kwargs
):
kwargs = self._weight_qconfig_kwargs.copy()
if kwargs["observer"] == "minmaxobserver":
kwargs["observer"] = torch_quantization.MinMaxObserver
return kwargs
else:
return self._weight_qconfig_kwargs
def num_calibration_steps(self) -> Optional[int]:
"""
:return: Number of steps to run post training calibration for.
When None, the entire calibration_dataloader is used
"""
return self._num_calibration_steps
def tensorrt(self) -> bool:
"""
:return: boolean. When set to True overrides quantization configs
to be compatible with TensorRT.
"""
return self._tensorrt
def initialize(
self,
module: Module,
epoch: float = 0,
loggers: Optional[List[BaseLogger]] = None,
calibration_dataloader: Optional[Iterable[Tuple[List, Dict[str, Any]]]] = None,
calibration_function: Optional[Callable] = None,
**kwargs,
):
"""
Grab the module / submodule to perform QAT on
:param module: the PyTorch model/module to modify
:param epoch: The epoch to initialize the modifier and module at.
Defaults to 0 (start of the training process)
:param loggers: Optional list of loggers to log the modification process to
:param calibration_dataloader: optional dataloader for running post training
quantization with the given model. if present, calibration will be run
immediately after quantization is enabled
:param calibration_function: An Optional callable to use for
calibration of module parameters post training. Should be able to
accept a batch of inputs along with a module.
Example: func(batch, module), Defaults to tensors_module_forward
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().initialize(module, epoch, loggers, **kwargs)
self._modules_to_quantize = []
self._calibration_dataloader = calibration_dataloader
self._calibration_function = calibration_function
if self._submodules is not None:
found_submodules = []
for name, submodule in module.named_modules():
if name.startswith("module."):
name = name[7:]
if name in self._submodules:
self._modules_to_quantize.append(_ModuleToQuantize(name, submodule))
found_submodules.append(name)
if not len(found_submodules) == len(self._submodules):
raise RuntimeError(
"Could not find all provided submodules to quantize"
"given: {}, found: {}".format(
list(self._submodules), found_submodules
)
)
else:
self._modules_to_quantize.append(_ModuleToQuantize(None, module))
self._check_quantization_update(module, epoch, steps_per_epoch=0)
def finalize(
self, module: Optional[Module] = None, reset_loggers: bool = True, **kwargs
):
"""
Cleans up any state
:param module: The model/module to finalize the modifier for.
Marked optional so state can still be cleaned up on delete,
but generally should always be passed in.
:param reset_loggers: True to remove any currently attached loggers (default),
False to keep the loggers attached.
:param kwargs: Optional kwargs to support specific arguments
for individual modifiers.
"""
super().finalize(module, reset_loggers, **kwargs)
self._modules_to_quantize = None
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
If start_pending(), fuses the model, sets the model quantization config,
calls torch.quantization.prepare_qat on the model to begin QAT
If end_pending(), updates the modules layers params to their original
trainable state.
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
self._check_quantization_update(module, epoch, steps_per_epoch)
def update_ready(self, epoch: float, steps_per_epoch: int) -> bool:
"""
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
:return: True if the modifier is pending an update and update() should be called
"""
if not self._initialized:
raise RuntimeError("modifier must be initialized first")
if not self._enabled:
return False
pending = (
self.start_pending(epoch, steps_per_epoch)
or self._disable_quantization_observer_update_ready(epoch)
or self._freeze_bn_stats_update_ready(epoch)
)
return pending
def advance_epochs(self, ref_start_epoch: float = None):
"""
Advance epoch attributes given a reference start epoch
:param ref_start_epoch: the reference, i.e. new, start epoch
"""
if ref_start_epoch is None:
return
super().advance_epochs(ref_start_epoch=ref_start_epoch)
if self._disable_quantization_observer_epoch is not None:
self._disable_quantization_observer_epoch = (
max(0.0, self._disable_quantization_observer_epoch) + ref_start_epoch
)
if self._freeze_bn_stats_epoch is not None:
self._freeze_bn_stats_epoch = (
max(0.0, self._freeze_bn_stats_epoch) + ref_start_epoch
)
self._validate_params()
def _check_quantization_update(
self, module: Module, epoch: float, steps_per_epoch: int
):
if self.start_pending(epoch, steps_per_epoch) and not self._qat_enabled:
self._enable_module_qat(module)
if self._disable_quantization_observer_update_ready(epoch):
for _, quant_module in self._modules_to_quantize:
quant_module.apply(torch_quantization.disable_observer)
self._quantization_observer_disabled = True
if self._freeze_bn_stats_update_ready(epoch):
for _, quant_module in self._modules_to_quantize:
quant_module.apply(freeze_bn_stats)
self._bn_stats_frozen = True
def _enable_module_qat(self, module: Module):
# fuse module Conv-BNs
if self.model_fuse_fn_name == "conv_bn_relus":
self._model_fuse_fn_kwargs["inplace"] = True
fuse_module_conv_bn_relus(module, **self._model_fuse_fn_kwargs)
elif self.model_fuse_fn_name != "no_fuse":
module_fuse_fn = getattr(module, self._model_fuse_fn_name, None)
if module_fuse_fn is None or not callable(module_fuse_fn):
raise ValueError(
"Invalid model_fuse_fn_name. "
"Module has no callable function {}".format(
self._model_fuse_fn_name
)
)
module_fuse_fn(**self._model_fuse_fn_kwargs)
# build list of layer types that should not quantize output activations
remove_activation_qat_layers = ["FloatFunctional"]
if not self.quantize_linear_activations:
remove_activation_qat_layers.extend(LINEAR_ACTIVATION_NAMES)
if not self.quantize_conv_activations:
remove_activation_qat_layers.extend(CONV_ACTIVATION_NAMES)
if not self.quantize_embedding_activations:
remove_activation_qat_layers.append("Embedding")
# fix for freezing batchnorm statistics when not fusing BN with convs.
# pytorch only supports freezing batchnorm statistics for fused modules.
# this fix wraps BN modules adding with a new module class that supports
# methods related to freezing/unfreezing BN statistics.
configure_module_bn_wrappers(module)
# set qconfig.
# if tensorrt flag is used, set activation and weights to symmetric
# quantization.
# otherwise, use the default values set in QConfigProperties
qproperties = QConfigProperties(
activation_bits=self.activation_bits,
weight_bits=self.weight_bits,
activation_qconfig_kwargs=self.activation_qconfig_kwargs,
weight_qconfig_kwargs=self.weight_qconfig_kwargs,
reduce_range=self.reduce_range,
)
if self.tensorrt:
_LOGGER.info(
"Overriding quantization scheme to symmetric int8 "
"for both weights and activations because tensorrt flag is True."
)
qproperties.tensorrt = True
qproperties.activation_dtype = torch.qint8
qproperties.weight_dtype = torch.qint8
qconfig = get_qat_qconfig(qproperties)
# prepare each module / submodule for quantization
for name, quant_module in self._modules_to_quantize:
# wrap any modules with wrap_qat set to True as QATWrapper(s)
configure_module_qat_wrappers(quant_module, qproperties)
# set quantization config (asymmetric activations, symmetric weights)
quant_module.qconfig = qconfig
# if for some reason the qconfig property is already set to None
# in a submodule, the desired qconfig will not be propagated if
# appropriate, calling helper function to delete these
_clear_null_qconfigs(quant_module)
# wrap all conv / linear blocks in with quantization observers
torch_quantization.propagate_qconfig_(quant_module)
configure_module_default_qconfigs(quant_module)
add_quant_dequant(
quant_module, name, module, self.custom_quantizable_module_types
)
# Remove output quantization from appropriate modules
remove_activation_qat_by_layer_name(
quant_module, remove_activation_qat_layers
)
# remove qconfigs for module types in exclude_module_types
to_exclude = ["Softmax"]
if self.exclude_module_types:
to_exclude.extend(self.exclude_module_types)
# if exclude_batchnorm flag is used, add batch norm layers to list of
# modules to exclude qconfig
if self.exclude_batchnorm:
to_exclude.extend(["BatchNorm1d", "BatchNorm2d", "BatchNorm3d"])
self._exclude_module_types = to_exclude
if self.exclude_module_types:
self._strip_excluded_module_qconfigs(module)
# set modules with proper qconfigs to QAT mode
self._prepare_qat(module, inplace=True)
if self._quantize_embeddings:
prepare_embeddings_qat(module, qproperties)
self._qat_enabled = True
self._calibrate_if_possible(module)
# mark export mode for module Conv layers
module.export_with_qlinearconv = self._quantize_conv_activations
if hasattr(module, "module"):
# for DP/DDP unwrapping
module.module.export_with_qlinearconv = self._quantize_conv_activations
def _prepare_qat(self, module, inplace=False):
# Set training mode to satisfy a constraint during torch's prepare_qat
prev_training_mode = module.training
module.training = True
torch_quantization.prepare_qat(module, inplace=inplace)
module.training = prev_training_mode
def _calibrate_if_possible(self, module):
if self.num_calibration_steps == 0 and self._calibration_dataloader:
warnings.warn(
f"num_calibration_steps is {self.num_calibration_steps}."
f"Calibration data loader will not be used."
)
elif self.num_calibration_steps and not self._calibration_dataloader:
raise ValueError(
f"num_calibration_steps is {self.num_calibration_steps}. "
"Calibration data loader is not set. Pass a "
"calibration_data_loader with initialize(...) method."
)
elif not self._calibration_dataloader or not self._qat_enabled:
return
elif self._calibration_dataloader:
self._calibrate(module)
def _calibrate(self, module):
_LOGGER.info("Running quantization calibration using calibration_dataloader")
module_training = module.training
module.eval()
forward_fn: Callable = (
self._calibration_function
if self._calibration_function
else tensors_module_forward
)
model_device = next(module.parameters()).device
_dataloader = (
self._calibration_dataloader
if self.num_calibration_steps is None
else cycle(self._calibration_dataloader)
)
for batch_idx, batch in enumerate(_dataloader):
if self.num_calibration_steps and batch_idx >= self.num_calibration_steps:
break
batch = tensors_to_device(batch, model_device)
with torch.no_grad():
forward_fn(batch, module=module)
if module_training:
module.train()
def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:
return (
self._disable_quantization_observer_epoch is not None
and epoch >= self._disable_quantization_observer_epoch
and not self._quantization_observer_disabled
)
def _freeze_bn_stats_update_ready(self, epoch: float) -> bool:
return (
self._freeze_bn_stats_epoch is not None
and epoch >= self._freeze_bn_stats_epoch
and not self._bn_stats_frozen
)
def _strip_excluded_module_qconfigs(self, module: Module):
if not self.exclude_module_types:
return
excluded_classes = set(self.exclude_module_types)
for submodule in module.modules():
if submodule.__class__.__name__ in excluded_classes and hasattr(
submodule, "qconfig"
):
submodule.qconfig = None
def _validate_params(self):
self.validate_schedule()
if (
self._disable_quantization_observer_epoch is not None
and self._disable_quantization_observer_epoch < self._start_epoch
):
raise ValueError(
f"disable_quantization_observer_epoch may not be greater than "
f"start_epoch for QuantizationModifier, received: "
f"{self._disable_quantization_observer_epoch} with start_epoch "
f"{self._start_epoch}"
)
if (
self._freeze_bn_stats_epoch is not None
and self._freeze_bn_stats_epoch < self._start_epoch
):
raise ValueError(
"freeze_bn_stats_epoch may not be greater than start_epoch"
" for QuantizationModifier, received: {} with start_epoch {}".format(
self._freeze_bn_stats_epoch, self._start_epoch
)
)
def _select_quantization_modifier(state: Dict[str, Any]) -> Type:
# if kwargs for the legacy quantization modifier are provided,
# route YAML loading to that class
legacy_props = set(
[
str(prop)
for prop in dir(LegacyQuantizationModifier)
if isinstance(getattr(LegacyQuantizationModifier, prop), ModifierProp)
]
)
current_props = set(
[
str(prop)
for prop in dir(QuantizationModifier)
if isinstance(getattr(QuantizationModifier, prop), ModifierProp)
]
)
legacy_only_props = legacy_props - current_props
return (
LegacyQuantizationModifier
if any(field in state for field in legacy_only_props)
else QuantizationModifier
) | null |
21,212 | import logging
import math
import warnings
from itertools import cycle
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
import torch
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import (
PyTorchModifierYAML,
ScheduledModifier,
)
from sparseml.pytorch.sparsification.quantization.helpers import (
configure_module_bn_wrappers,
freeze_bn_stats,
fuse_module_conv_bn_relus,
)
from sparseml.pytorch.sparsification.quantization.legacy_modifier_quantization import (
QuantizationModifier as LegacyQuantizationModifier,
)
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationScheme,
QuantizationSchemeLoadable,
)
from sparseml.pytorch.sparsification.quantization.quantize import (
convert_module_qat_from_schemes,
raise_if_torch_quantization_not_available,
set_quantization_schemes,
)
from sparseml.pytorch.utils import BaseLogger, tensors_module_forward, tensors_to_device
from sparseml.sparsification import SparsificationTypes
class _QuantizationSchemesDict(dict):
def __str__(self):
QuantizationSchemeLoadable = Union[
"QuantizationScheme",
DictQuantizationScheme,
str,
None,
]
class QuantizationScheme(BaseModel):
def __init__(self, *args, **kwargs):
def load(
cls,
scheme: QuantizationSchemeLoadable,
default: Optional["QuantizationScheme"] = None,
) -> "QuantizationScheme":
def deepsparse(cls) -> "QuantizationScheme":
def tensorrt(cls) -> "QuantizationScheme":
def get_qconfig(self) -> "torch.quantization.QConfig":
def get_wrapper_qconfig(self) -> "torch.quantization.QConfig":
def __str__(self) -> str:
def _load_quantization_schemes_dict(
schemes_dict: Optional[Dict[str, QuantizationSchemeLoadable]],
default_scheme: QuantizationScheme,
) -> Dict[str, QuantizationScheme]:
if schemes_dict is None:
return {}
return _QuantizationSchemesDict(
{
submodule: QuantizationScheme.load(scheme, default=default_scheme)
for submodule, scheme in schemes_dict.items()
}
) | null |
21,213 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
class _BNWrapper(Module):
"""
Wraps BatchNormalization module to expose methods needed to enable
freezing/unfreezing of statistics
:param module: BatchNormalization module to be wrapped
"""
def __init__(self, module: Module):
super().__init__()
self.bn = module
self.freeze_bn = False
def running_mean(self):
return self.bn.running_mean
def running_mean(self, value):
self.bn.running_mean = value
def running_var(self):
return self.bn.running_var
def running_var(self, value):
self.bn.running_var = value
def weight(self):
return self.bn.weight
def weight(self, value):
self.bn.weight = value
def bias(self):
return self.bn.bias
def bias(self, value):
self.bn.bias = value
def gamma(self):
return self.bn.gamma
def gamma(self, value):
self.bn.gamma = value
def beta(self):
return self.bn.beta
def beta(self, value):
self.bn.beta = value
def num_batches_tracked(self):
return self.bn.num_batches_tracked
def num_batches_tracked(self, value):
self.bn.num_batches_tracked = value
def eps(self):
return self.bn.eps
def eps(self, value):
self.bn.eps = value
def momentum(self):
return self.bn.momentum
def momentum(self, value):
self.bn.momentum = value
def forward(self, x):
return self.bn(x)
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def reset_running_stats(self):
self.bn.reset_running_stats()
def train(self, mode=True):
if not self.freeze_bn:
self.bn.train(mode)
return self
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
The provided code snippet includes necessary dependencies for implementing the `configure_module_bn_wrappers` function. Write a Python function `def configure_module_bn_wrappers(module: Module)` to solve the following problem:
Wrap any BatchNormalization modules that are not fused with convolutions with BNWrapper to enable freezing/unfreezing of BN statistics :param module: module to potentially wrap the submodules of
Here is the function:
def configure_module_bn_wrappers(module: Module):
"""
Wrap any BatchNormalization modules that are not fused with convolutions
with BNWrapper to enable freezing/unfreezing of BN statistics
:param module: module to potentially wrap the submodules of
"""
# wrap any children of the given module as a QATWrapper if required
if not hasattr(module, "freeze_bn_stats"):
for child_name, child_module in module.named_children():
if type(child_module) in [
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
]:
setattr(module, child_name, _BNWrapper(child_module))
# recurse on child module
configure_module_bn_wrappers(child_module) | Wrap any BatchNormalization modules that are not fused with convolutions with BNWrapper to enable freezing/unfreezing of BN statistics :param module: module to potentially wrap the submodules of |
21,214 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
class QConfigProperties:
"""
Dataclass that stores properties needed to define qconfig objects.
Default values set here.
:param symmetric_activations: if True, activations will have a symmetric
quantization range with a pre-specified zero point
(0 if activation_dtype=torch.qint8, 128 if activation_dtype=torch.quint8).
Default is False.
:param symmetric_weights: if True, weights will have a symmetric
quantization range with a pre-specified zero point
(0 if weight_dtype=torch.qint8, 128 if weight_dtype=torch.quint8).
Default is True.
:param reduce_range: if True, the quantization range will be reduced by one bit.
This may prevent overflow issues with model execution on certain hardware.
Default is False.
:param activation_qconfig_kwargs: Additional kwargs for quantization of
activations.
:param weight_qconfig_kwargs: Additional kwargs for quantization of
weights.
:param activation_dtype: quantized activation data type.
Default is torch.quint8.
:param weight_dtype: quantized weights data type.
Default is torch.qint8.
:param activation_bits: number of bits for activations. Default is 8.
:param weight_bits: number of bits for weights. Default is 8.
:param activation_strategy: "tensor" to quantize over the whole activation tensor,
or "channel" to quantize per channel. Default is "tensor"
:param weight_strategy: "tensor" to quantize over the whole weight tensor, or
"channel" to quantize per channel. Default is "tensor"
:param tensorrt: if True sets quantization configuration for compatibility with
explict quantization as supported by TensorRT 8.2.
"""
_symmetric_activations: bool = False
_symmetric_weights: bool = True
reduce_range: bool = False
activation_dtype: torch.dtype = torch.quint8
weight_dtype: torch.dtype = torch.qint8
activation_bits: int = 8
weight_bits: int = 8
activation_strategy: str = "tensor"
weight_strategy: str = "tensor"
activation_qconfig_kwargs: Dict[str, Any] = field(default_factory=dict)
weight_qconfig_kwargs: Dict[str, Any] = field(default_factory=dict)
tensorrt: bool = False
def symmetric_activations(self) -> bool:
# always use symmetric activations in tensorrt mode
return self.tensorrt or self._symmetric_activations
def symmetric_activations(self, value: bool):
self._symmetric_activations = value
def symmetric_weights(self) -> bool:
return self.tensorrt or self._symmetric_weights
def symmetric_weights(self, value: bool):
self._symmetric_weights = value
class QATWrapper(Module):
"""
Wraps inputs and outputs of a Module or function with QuantStubs for
Quantization-Aware-Training (QAT)
:param forward_fn: function to be wrapped, should generally accept and return
torch Tensor(s)
:param num_inputs: number of inputs of the forward function to add a QuantStub
to. Will wrap the first num_inputs ordered inputs of the function. Default
is 1
:param kwarg_input_names: list of names of key word arguments to the forward pass
that should be wrapped with a fake quantize operation. Defaults to empty
:param num_outputs: number of outputs of the forward function to add a QuantStub
to. Will wrap the first num_inputs ordered outputs of the function. Default
is 1. Will also add a DeQuantStub for FP32 conversion if
torch.quantization.convert is invoked
:param input_qconfigs: QConfig to use for calibrating the input QuantStubs. Can
be a single QConfig that will be copied to each QuantStub or a list of one
QConfig for each input. Instead of a QConfig objects, the string 'asymmetric'
or 'symmetric' may be used to use default UINT8 asymmetric and symmetric
quantization respectively
:param output_qconfigs: QConfig to use for calibrating the output QuantStubs. Can
be a single QConfig that will be copied to each QuantStub or a list of one
QConfig for each output. Instead of a QConfig objects, the string 'asymmetric'
or 'symmetric' may be used to use default UINT8 asymmetric and symmetric
quantization respectively
:param qproperties: properties used to define QConfig. may also be a quantization
scheme
"""
def from_module(
module: Module,
qproperties: Union[QConfigProperties, QuantizationScheme],
) -> "QATWrapper":
"""
:param module: torch Module to create a QATWrapper for
:return: QATWrapper object created using the given Module as the forward
function. Will attempt to find any other named parameter of the QATWrapper
constructor from the attributes of the given Module
"""
qat_wrapper_kwargs = (
module.qat_wrapper_kwargs or {}
if hasattr(module, "qat_wrapper_kwargs")
else {}
)
# Remove qconfig from wrapped layer to avoid duplicate quantization
module.qconfig = None
return QATWrapper(
forward_fn=module, qproperties=qproperties, **qat_wrapper_kwargs
)
def __init__(
self,
forward_fn: Callable[[Any], Any],
qproperties: Union[QConfigProperties, QuantizationScheme],
num_inputs: int = 1,
kwarg_input_names: List[str] = None,
num_outputs: int = 1,
input_qconfigs: Union[
"torch.quantization.QConfig", str, List["torch.quantization.QConfig"]
] = "asymmetric",
output_qconfigs: Union[
"torch.quantization.QConfig", str, List["torch.quantization.QConfig"]
] = "asymmetric",
):
super().__init__()
if torch_quantization is None:
raise RuntimeError(
"Unable to import package torch.quantization. "
"Try upgrading your PyTorch version to >= 1.7.0."
)
if not callable(forward_fn):
raise ValueError(
"forward_fn of QATWrapper must be callable. "
f"Received {type(forward_fn)}"
)
self.kwarg_input_names = kwarg_input_names or []
num_input_quant_stubs = num_inputs + len(self.kwarg_input_names)
self.forward_fn = forward_fn
# Add weight qconfig to forward_fn (in case it has weights)
qconfig_ = (
get_qat_qconfig(qproperties)
if isinstance(qproperties, QConfigProperties)
else qproperties.get_qconfig() # QuantizationScheme
)
qconfig = torch_quantization.QConfig(
activation=torch.nn.Identity,
weight=qconfig_.weight,
)
self.forward_fn.qconfig = qconfig
self.input_qconfigs = self._load_qconfigs(
name="input_qconfigs",
expected_len=num_input_quant_stubs,
qconfigs=input_qconfigs,
qproperties=qproperties,
)
self.output_qconfigs = self._load_qconfigs(
name="output_qconfigs",
expected_len=num_outputs,
qconfigs=output_qconfigs,
qproperties=qproperties,
)
self.input_quant_stubs = torch.nn.ModuleList(
[torch_quantization.QuantStub() for _ in range(num_input_quant_stubs)]
)
self.output_quant_stubs = torch.nn.ModuleList(
[torch_quantization.QuantStub() for _ in range(num_outputs)]
)
self.output_dequant_stubs = torch.nn.ModuleList(
[torch_quantization.DeQuantStub() for _ in range(num_outputs)]
)
def forward(self, *args, **kwargs) -> Any:
"""
:param args: arguments to forward function; the first num_inputs of these args
will be wrapped by a QuantStub
:param kwargs: key word arguments to pass to the wrapped forward function
:return: outputs of the forward function with a QuantStub applied to the first
num_outputs outputs
"""
if any(kwarg not in kwargs for kwarg in self.kwarg_input_names):
raise ValueError(
f"QATWrapper expected kwargs {self.kwarg_input_names} to be included "
f"in forward function kwargs. Found {list(kwargs.keys())}. missing "
f"{[kwarg for kwarg in self.kwarg_input_names if kwarg not in kwargs]}"
)
qat_args = []
# fake quantize positional arguments
num_args_stubs = len(self.input_quant_stubs) - len(self.kwarg_input_names)
for idx, arg in enumerate(args):
if idx < num_args_stubs:
arg = self.input_quant_stubs[idx](arg)
qat_args.append(arg)
# fake quantize key word arguments
for idx, kwarg in enumerate(self.kwarg_input_names):
kwargs[kwarg] = self.input_quant_stubs[num_args_stubs + idx](kwargs[kwarg])
# wrapped forward pass
outputs = self.forward_fn(*qat_args, **kwargs)
if len(self.output_quant_stubs) == 0:
# no output wrapping
return outputs
if isinstance(outputs, torch.Tensor):
if len(self.output_quant_stubs) > 1:
raise ValueError(
f"QATWrapper expected {len(self.output_quant_stubs)} outputs in "
"forward pass. Found one output"
)
# output is a single Tensor
qat_output = self.output_quant_stubs[0](outputs)
return self.output_dequant_stubs[0](qat_output)
qat_outputs = []
for idx, output in enumerate(outputs):
if idx < len(self.output_quant_stubs):
output = self.output_quant_stubs[idx](output)
output = self._output_deuant_stubs[idx](output)
qat_outputs.append(output)
return qat_outputs
def configure_qconfig(self):
"""
Sets the qconfigs of the quant stubs to the pre-initialized QConfigs
"""
for quant_stub, qconfig in zip(self.input_quant_stubs, self.input_qconfigs):
quant_stub.qconfig = qconfig
if hasattr(qconfig, "quantization_stub"):
quant_stub.quantization_stub = qconfig.quantization_stub
for quant_stub, qconfig in zip(self.output_quant_stubs, self.output_qconfigs):
quant_stub.qconfig = qconfig
if hasattr(qconfig, "quantization_stub"):
quant_stub.quantization_stub = qconfig.quantization_stub
def _load_qconfigs(
name: str,
expected_len: int,
qconfigs: Union["QConfig", str, List["QConfig"]], # noqa: F821
qproperties: QConfigProperties,
):
if not isinstance(qconfigs, (str, torch_quantization.QConfig, List)):
raise ValueError(
f"QATWrapper {name} must be a string, torch.quantization.QConfig, "
f"or a List of them. Received a {type(qconfigs)}"
)
if isinstance(qconfigs, (str, torch_quantization.QConfig)):
qconfigs = [deepcopy(qconfigs) for _ in range(expected_len)]
if len(qconfigs) != expected_len:
raise ValueError(
f"QATWrapper {name} should have exactly one qconfig or one for every "
f"argument ({expected_len}). Given {len(qconfigs)}"
)
valid_qconfig_strs = ["asymmetric", "symmetric"]
for idx, qconfig in enumerate(qconfigs):
if not isinstance(qconfig, str):
continue
if qconfig not in valid_qconfig_strs:
raise ValueError(
"QATWrapper qconfig names can either be "
"torch.quantization.QConfig objects or a string "
f"in {valid_qconfig_strs} that will be converted to a QConfig. "
f"Found string with value {qconfig} in {name}"
)
qconfig_idx = None
if isinstance(qproperties, QConfigProperties):
qproperties_idx = deepcopy(qproperties)
qproperties_idx.symmetric_activations = qconfig == "symmetric"
qconfig_idx = get_qat_qconfig(qproperties_idx)
else:
scheme_idx = deepcopy(qproperties)
symmetric = qconfig == "symmetric"
# always use output_activations of scheme because the activations
# of the QuantStub() are the ones tracked
if scheme_idx.output_activations is not None:
scheme_idx.input_activations.symmetric = symmetric
else:
scheme_idx.output_activations = QuantizationArgs(
symmetric=symmetric
)
qconfig_idx = scheme_idx.get_qconfig()
qconfig_idx.quantization_scheme = scheme_idx
qconfigs[idx] = qconfig_idx
return qconfigs
The provided code snippet includes necessary dependencies for implementing the `configure_module_qat_wrappers` function. Write a Python function `def configure_module_qat_wrappers( module: Module, qproperties: QConfigProperties, )` to solve the following problem:
if any submodule of the given module has the attribute wrap_qat == True, then it will be replaced by a QATWrapper of it created by QATWrapper.from_module. Other named kwargs to the QATWrapper constructor must be contained in a dictionary under an attributed named `qat_wrapper_kwargs` :param module: module to potentially wrap the submodules of :param qproperties: properties used to define QConfig.
Here is the function:
def configure_module_qat_wrappers(
module: Module,
qproperties: QConfigProperties,
):
"""
if any submodule of the given module has the attribute wrap_qat == True,
then it will be replaced by a QATWrapper of it created by QATWrapper.from_module.
Other named kwargs to the QATWrapper constructor must be contained in a dictionary
under an attributed named `qat_wrapper_kwargs`
:param module: module to potentially wrap the submodules of
:param qproperties: properties used to define QConfig.
"""
# wrap any children of the given module as a QATWrapper if required
for child_name, child_module in module.named_children():
if hasattr(child_module, "wrap_qat") and child_module.wrap_qat:
setattr(
module,
child_name,
QATWrapper.from_module(
module=child_module,
qproperties=qproperties,
),
)
# recurse on child module
configure_module_qat_wrappers(
module=child_module,
qproperties=qproperties,
) | if any submodule of the given module has the attribute wrap_qat == True, then it will be replaced by a QATWrapper of it created by QATWrapper.from_module. Other named kwargs to the QATWrapper constructor must be contained in a dictionary under an attributed named `qat_wrapper_kwargs` :param module: module to potentially wrap the submodules of :param qproperties: properties used to define QConfig. |
21,215 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
_QUANTIZABLE_MODULE_TYPES = (
{
# Conv based layers
torch.nn.Conv1d,
torch.nn.Conv2d,
torch.nn.Conv3d,
nni.ConvBn1d,
nni.ConvBn2d,
nni.ConvBn3d,
nni.ConvReLU1d,
nni.ConvReLU2d,
nni.ConvReLU3d,
nni.ConvBnReLU1d,
nni.ConvBnReLU2d,
nni.ConvBnReLU3d,
# Linear Layers
torch.nn.Linear,
nni.LinearReLU,
}
if nni # nni will always import if torch.quantization is available
else None
)
The provided code snippet includes necessary dependencies for implementing the `add_quant_dequant` function. Write a Python function `def add_quant_dequant( module: torch.nn.Module, name=None, parent_module=None, layer_class_names=None )` to solve the following problem:
Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper :param module: the module to modify :param name: name of the module to modify; default to None :param parent_module: parent module containing the module to modify; default to None :param layer_class_names: list of module class names to be added to the list of quantizable modules :return: the modified module
Here is the function:
def add_quant_dequant(
module: torch.nn.Module, name=None, parent_module=None, layer_class_names=None
):
"""
Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper
:param module: the module to modify
:param name: name of the module to modify; default to None
:param parent_module: parent module containing the module to modify; default to None
:param layer_class_names: list of module class names to be added to the
list of quantizable modules
:return: the modified module
"""
named_children = module.named_children()
is_quantizable = type(module) in _QUANTIZABLE_MODULE_TYPES
if layer_class_names:
is_quantizable = (
is_quantizable or module.__class__.__name__ in layer_class_names
)
if is_quantizable and hasattr(module, "qconfig") and module.qconfig:
module = torch_quantization.QuantWrapper(module)
if parent_module is not None and len(list(named_children)) <= 0:
if "." in name:
# unwrap name under parent module, nested through multiple submodules
name_parts = name.split(".")
for name_part in name_parts[:-1]:
parent_module = getattr(parent_module, name_part)
name = name_parts[-1]
# set parent module child to the newly wrapped module
setattr(parent_module, name, module)
else:
for name, child in named_children:
setattr(
module,
name,
add_quant_dequant(child, layer_class_names=layer_class_names),
)
return module | Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper :param module: the module to modify :param name: name of the module to modify; default to None :param parent_module: parent module containing the module to modify; default to None :param layer_class_names: list of module class names to be added to the list of quantizable modules :return: the modified module |
21,216 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
The provided code snippet includes necessary dependencies for implementing the `remove_activation_qat_by_layer_name` function. Write a Python function `def remove_activation_qat_by_layer_name(module: Module, layer_class_names: List[str])` to solve the following problem:
Disables fake quantization of activations for all submodules of the given module with class name layer_class_names :param module: module to remove activation fake quantization for certain layers :param layer_class_names: list of layer class names that should be affected. e.x. ["Linear"]
Here is the function:
def remove_activation_qat_by_layer_name(module: Module, layer_class_names: List[str]):
"""
Disables fake quantization of activations for all submodules of the given module
with class name layer_class_names
:param module: module to remove activation fake quantization for certain layers
:param layer_class_names: list of layer class names that should be affected.
e.x. ["Linear"]
"""
for submodule in module.modules():
if submodule.__class__.__name__ in layer_class_names and hasattr(
submodule, "qconfig"
):
submodule.qconfig = torch_quantization.QConfig(
activation=torch.nn.Identity,
weight=submodule.qconfig.weight,
) | Disables fake quantization of activations for all submodules of the given module with class name layer_class_names :param module: module to remove activation fake quantization for certain layers :param layer_class_names: list of layer class names that should be affected. e.x. ["Linear"] |
21,217 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
def freeze_bn_stats(module: Module):
if hasattr(module, "freeze_bn_stats"):
module.freeze_bn_stats() | null |
21,218 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.utils import get_layer
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
def _delete_get_block_hooks(
module: Module,
fuse_blocks: List[List[str]],
) -> List[Tuple[Any, Any]]:
block_hooks = []
for block in fuse_blocks:
pre_hooks = []
post_hooks = []
for name in block:
# get Module objects in block by their names
m = get_layer(name, module)
# extract the hooks
pre_hooks.extend(m._forward_pre_hooks.values())
post_hooks.extend(m._forward_hooks.values())
# de-register the hooks from this module
m._forward_pre_hooks.clear()
m._forward_hooks.clear()
block_hooks.append((pre_hooks, post_hooks))
return block_hooks
def _add_fused_block_hooks(module: Module, block_hooks: List[Tuple[Any, Any]]):
fused_modules = [
mod for mod in module.modules() if isinstance(mod, _FUSED_MODULE_TYPES)
]
if len(fused_modules) != len(block_hooks):
raise RuntimeError(
f"Number of fused modules ({len(fused_modules)}) after layer fusion in "
f"module {module.__class__.__name__}. does not match expected "
f"({len(block_hooks)}). Module may have already been fused or block "
"skipped during torch.quantization.fuse_modules"
)
for fused_module, (pre_hooks, post_hooks) in zip(fused_modules, block_hooks):
for pre_hook in pre_hooks:
fused_module.register_forward_pre_hook(pre_hook)
for post_hook in post_hooks:
fused_module.register_forward_hook(post_hook)
def _set_submodule(root_module: Module, sub_module_path, sub_module: Module):
sub_module.training = root_module.training
current_module = root_module
sub_module_path = sub_module_path.split(".")
for child_module in sub_module_path[:-1]:
current_module = getattr(current_module, child_module)
setattr(current_module, sub_module_path[-1], sub_module)
def _wrap_bn_sub_class(bn_subclass, override_forward=True):
batch_norm = BatchNorm2d(bn_subclass.num_features)
batch_norm.__dict__ = bn_subclass.__dict__
if override_forward:
batch_norm.forward = bn_subclass.forward
del bn_subclass
return batch_norm
The provided code snippet includes necessary dependencies for implementing the `fuse_module_conv_bn_relus` function. Write a Python function `def fuse_module_conv_bn_relus( module: Module, inplace: bool = True, override_bn_subclasses_forward: Union[bool, str] = True, ) -> Module` to solve the following problem:
Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in module.named_modules() and be in the same submodule. Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or Conv2d -> BatchNorm2d -> ReLU blocks If this function does not fuse the model in the desired way, implement an in place fusing function for the model. :param module: the module to fuse :param inplace: set True to perform fusions in-place. default is True :param override_bn_subclasses_forward: if True, modules that are subclasses of BatchNorm2d will be modified to be BatchNorm2d but with the forward pass and state variables copied from the subclass. This is so these BN modules can pass PyTorch type checking when fusing. Can set to "override-only" and only parameters will be overwritten, not the forward pass. Default is True :return: the fused module
Here is the function:
def fuse_module_conv_bn_relus(
module: Module,
inplace: bool = True,
override_bn_subclasses_forward: Union[bool, str] = True,
) -> Module:
"""
Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the
given module. To be fused, these layers must appear sequentially in
module.named_modules() and be in the same submodule.
Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or
Conv2d -> BatchNorm2d -> ReLU blocks
If this function does not fuse the model in the desired way, implement an
in place fusing function for the model.
:param module: the module to fuse
:param inplace: set True to perform fusions in-place. default is True
:param override_bn_subclasses_forward: if True, modules that are subclasses of
BatchNorm2d will be modified to be BatchNorm2d but with the forward
pass and state variables copied from the subclass. This is so these
BN modules can pass PyTorch type checking when fusing. Can set to
"override-only" and only parameters will be overwritten, not the
forward pass. Default is True
:return: the fused module
"""
if torch_quantization is None:
raise RuntimeError(
"Unable to import package torch.quantization. "
"Try upgrading your PyTorch version."
)
if not inplace:
module = deepcopy(module)
conv_blocks = []
current_block = []
current_block_submodule_name = ""
for name, layer in module.named_modules():
submodule_name = ".".join(name.split(".")[:-1])
if (
len(current_block) == 1 # [Conv2d]
and isinstance(layer, BatchNorm2d)
and submodule_name == current_block_submodule_name
) or (
len(current_block) in [1, 2] # [Conv2d] or [Conv2d, BatchNorm2d]
and isinstance(layer, ReLU)
and not isinstance(current_block[-1], ReLU)
and submodule_name == current_block_submodule_name
):
if isinstance(layer, ReLU_nm):
_set_submodule(module, name, ReLU(inplace=layer.inplace))
if isinstance(layer, BatchNorm2d) and not type(layer) is BatchNorm2d:
if not override_bn_subclasses_forward:
raise RuntimeError(
"Detected a Conv-BN block that uses a subclass of BatchNorm2d. "
"This will cause a type error when fusing with PyTorch, "
"set override_bn_subclasses_forward to True or 'override-only "
"to modify this BN subclass to be a BatchNorm2d object"
)
# swap BN subclass with overwritten BN class that will pass torch
# type checking
overwritten_bn = _wrap_bn_sub_class(
layer,
override_forward=override_bn_subclasses_forward != "override-only",
)
_set_submodule(module, name, overwritten_bn),
current_block.append(name)
else:
if current_block:
if len(current_block) > 1: # cannot fuse single module
conv_blocks.append(current_block)
current_block = []
current_block_submodule_name = ""
if isinstance(layer, Conv2d):
current_block.append(name)
current_block_submodule_name = submodule_name
if len(current_block) > 1:
conv_blocks.append(current_block)
if conv_blocks:
# manually save and move hooks surrounding fused blocks
# into new fused modules due to torch.quantization
# error when a module has more than one hook
block_hooks = _delete_get_block_hooks(module, conv_blocks)
# run torch fusion
if _PARSED_TORCH_VERSION < version.parse("1.10.0"):
torch_quantization.fuse_modules(module, conv_blocks, inplace=True)
else:
if module.training:
torch.ao.quantization.fuse_modules_qat(
module, conv_blocks, inplace=True
)
else:
torch.ao.quantization.fuse_modules(module, conv_blocks, inplace=True)
# add hooks back
_add_fused_block_hooks(module, block_hooks)
return module | Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in module.named_modules() and be in the same submodule. Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or Conv2d -> BatchNorm2d -> ReLU blocks If this function does not fuse the model in the desired way, implement an in place fusing function for the model. :param module: the module to fuse :param inplace: set True to perform fusions in-place. default is True :param override_bn_subclasses_forward: if True, modules that are subclasses of BatchNorm2d will be modified to be BatchNorm2d but with the forward pass and state variables copied from the subclass. This is so these BN modules can pass PyTorch type checking when fusing. Can set to "override-only" and only parameters will be overwritten, not the forward pass. Default is True :return: the fused module |
21,219 | import logging
from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict, List, NamedTuple, Optional, Union
import numpy
import onnx
import torch
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import (
ONNXGraph,
get_batch_norm_params,
get_init_by_name,
get_node_attributes,
get_node_output_nodes,
quantize_resnet_identity_add_inputs,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparsezoo.utils import save_onnx
KEEP_QUANT_INPUT_OPS = [
"Add",
"ConvInteger",
"MatMulInteger",
"QLinearConv",
"QLinearMatMul",
"QLinearAdd",
]
def delete_quant_node(
model: ModelProto,
node: NodeProto,
keep_weight: bool = False,
):
"""
Deletes a QuantizeLinear or DequantizeLinear and its parameters from the model
:param model: ONNX model to modify
:param node: the QuantizeLinear or DequantizeLinear node to delete
:param keep_weight: set true to not delete the weight param possibly stored as an
initializer to the first input of this node
"""
assert (
node.op_type in _QUANTIZE_OP_NAMES
), "Op Type must be either QuantizeLinear or DequantizeLinear, found {} ".format(
node.op_type
)
if keep_weight:
del node.input[0]
remove_node_and_params_from_graph(model, node)
def _replace_input_id_model(model: ModelProto, old_id: str, new_id: str):
for node in model.graph.node:
for idx, inp in enumerate(node.input):
if inp == old_id:
node.input[idx] = new_id
The provided code snippet includes necessary dependencies for implementing the `_cleanup_unused_quants` function. Write a Python function `def _cleanup_unused_quants(model: ModelProto)` to solve the following problem:
A pass for removing unused Quantize->Dequantize blocks. This should be called at the end of conversion, once all of the conversions to quantized operators has been tried. Example: op -> QuantizeLinear -> DequantizeLinear -> non-quantized op => op -> non-quantized operator
Here is the function:
def _cleanup_unused_quants(model: ModelProto):
"""
A pass for removing unused Quantize->Dequantize blocks.
This should be called at the end of conversion, once all of the conversions
to quantized operators has been tried.
Example:
op -> QuantizeLinear -> DequantizeLinear -> non-quantized op
=> op -> non-quantized operator
"""
graph = ONNXGraph(model)
nodes_to_delete = []
quant_nodes = [n for n in model.graph.node if n.op_type == "QuantizeLinear"]
output_names = [out.name for out in model.graph.output]
for quant_node in quant_nodes:
dequant_node = graph.get_node_single_child(quant_node)
if not dequant_node or dequant_node.op_type != "DequantizeLinear":
continue
removable = not any(
output_id in output_names for output_id in dequant_node.output
)
dequant_children = graph.get_node_children(dequant_node)
for child in dequant_children:
# check if any dequant children depend on having QDQ inputs
if isinstance(child, onnx.NodeProto) and (
child.op_type in KEEP_QUANT_INPUT_OPS
):
removable = False
if not removable:
continue
# Forward QuantizeLinear input to DequantizeLinear output
_replace_input_id_model(model, dequant_node.output[0], quant_node.input[0])
# Remove QuantizeLinear->DequantizeLinear block
nodes_to_delete.append(quant_node)
nodes_to_delete.append(dequant_node)
for n in nodes_to_delete:
delete_quant_node(model, n)
# update graph
graph.update()
graph.delete_unused_initializers() | A pass for removing unused Quantize->Dequantize blocks. This should be called at the end of conversion, once all of the conversions to quantized operators has been tried. Example: op -> QuantizeLinear -> DequantizeLinear -> non-quantized op => op -> non-quantized operator |
21,220 | import logging
from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict, List, NamedTuple, Optional, Union
import numpy
import onnx
import torch
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import (
ONNXGraph,
get_batch_norm_params,
get_init_by_name,
get_node_attributes,
get_node_output_nodes,
quantize_resnet_identity_add_inputs,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparsezoo.utils import save_onnx
def _fold_qat_conv_bns(model: ModelProto):
# conv weight should already be folded in quantize linear
# remove the that div undos the weight folding
# fold bn into conv bias and remove bn node
# (Conv -> Div -> BN) -> Conv
for conv_node in model.graph.node:
if conv_node.op_type != "Conv":
# not conv node or conv node already has bias
continue
graph = ONNXGraph(model)
div_node = graph.get_node_single_child(conv_node)
if not div_node or div_node.op_type != "Div":
continue
bn_node = graph.get_node_single_child(div_node)
if not bn_node or bn_node.op_type != "BatchNormalization":
continue
# forward conv output to div children
swap_node_output(conv_node, div_node.output[0])
# remove div from graph
remove_node_and_params_from_graph(model, div_node)
# fold bn into conv bias and remove bn
_fold_conv_bn_bias(model, conv_node, bn_node)
def _fold_relu_quants(model: ModelProto):
# delete relu nodes that feed directly into quantize nodes with a zero point of 0
for relu_node in model.graph.node:
if relu_node.op_type != "Relu":
continue
relu_children = get_node_output_nodes(model, relu_node)
if not relu_children or any(
node.op_type != "QuantizeLinear" for node in relu_children
): # skip if any child is not a quantize node
continue
quantize_params = [
get_quantization_params(model, quant_node) for quant_node in relu_children
]
if any(params.zero_point != 0 for params in quantize_params):
# skip if activation zero point does not match relu threshold of 0
continue
# set all child input nodes to the relu node input
for quant_node in relu_children:
quant_node.input[0] = relu_node.input[0]
# delete relu node
remove_node_and_params_from_graph(model, relu_node)
def _convert_single_constants_to_initializers(model: ModelProto):
non_single_constant_nodes = [] # list of nodes to keep
for node in model.graph.node:
if node.op_type != "Constant" or len(node.attribute) != 1:
non_single_constant_nodes.append(node)
continue # skip non-constants, and constants with multiple tensors
# create initializer
const_array = numpy_helper.to_array(node.attribute[0].t)
# convert int8 -> uint8
if const_array.dtype == numpy.int8:
const_array = const_array + 128
const_array = const_array.astype(numpy.uint8)
# add named tensor to initializer list
initializer = numpy_helper.from_array(const_array, name=node.output[0])
model.graph.initializer.append(initializer)
# bulk remove all converted constants by overwriting node list
model.graph.ClearField("node")
model.graph.node.extend(non_single_constant_nodes)
def _convert_signed_to_unsigned(model: ModelProto):
# converts all int8 initializers to uint8 initializers for consistency
# between quantized op input/weights
def _cast_init_int8_to_uint8(int8_init):
arr_int8 = numpy_helper.to_array(int8_init)
arr_uint8 = (arr_int8.astype(numpy.int32) + 128).astype(numpy.uint8)
return numpy_helper.from_array(arr_uint8, name=int8_init.name)
to_append = []
to_remove = []
for init in model.graph.initializer:
if init.data_type == 3: # int8 dtype
init_uint8 = _cast_init_int8_to_uint8(init)
to_append.append(init_uint8)
to_remove.append(init)
for init in to_remove:
model.graph.initializer.remove(init)
model.graph.initializer.extend(to_append)
def _delete_repeated_qat_blocks(model: ModelProto):
# removes repeated qat quant/dequant blocks with the same parameters
# (Quant -> Dequant -> Quant -> Dequant) -> (Quant -> Dequant)
graph = ONNXGraph(model)
nodes_to_delete = []
quant_nodes = [n for n in model.graph.node if n.op_type == "QuantizeLinear"]
for quant_node_1 in quant_nodes:
dequant_node_1 = graph.get_node_single_child(quant_node_1)
if not dequant_node_1 or dequant_node_1.op_type != "DequantizeLinear":
continue
quant_node_2 = graph.get_node_single_child(dequant_node_1)
if not quant_node_2 or quant_node_2.op_type != "QuantizeLinear":
continue
dequant_node_2 = graph.get_node_single_child(quant_node_2)
if not dequant_node_2 or dequant_node_2.op_type != "DequantizeLinear":
continue
# forward first qat block input to that of the second
quant_node_2.input[0] = quant_node_1.input[0]
# remove repeated quant/dequant block
nodes_to_delete.append(quant_node_1)
nodes_to_delete.append(dequant_node_1)
for n in nodes_to_delete:
delete_quant_node(model, n)
# cleanup graph
graph.update()
graph.delete_unused_initializers()
def _convert_quantizable_matmul(model: ModelProto):
_convert_quantizable_matmul_with_quantized_outputs(model)
_convert_quantizable_matmuls_with_nonquantized_outputs(model)
def _convert_quantizable_gemm_no_activations(model: ModelProto):
"""
A pass for converting a Gemm op with kernel whose activations
are not necessarily quantized into a MatMulInteger followed by
a bias add and cast to FP32
| Starting with:
|
| INPUT QuantizeLinear (with constant kernel)
| | |
| DequantizeLinear DequantizeLinear
| | |
| Gemm (with bias)
| |
| OUTPUT
| We end up converting to:
| INPUT
| |
| MatMulInteger (with constant uint8 kernel)
| |
| Add (constant bias + zero point correction)
| |
| Cast (INT32 -> FP32)
| |
| Mul (Rescale from bias scale)
| |
| OUTPUT
"""
conversion_count = 0
gemm_nodes = [n for n in model.graph.node if n.op_type in ["Gemm"]]
for gemm_node in gemm_nodes:
if len(gemm_node.input) != 3:
# this function currently only converts Gemm nodes with bias add
continue
gemm_attributes = get_node_attributes(gemm_node)
if (
gemm_attributes.get("alpha", 1.0) != 1.0
or (gemm_attributes.get("beta", 1.0) != 1.0)
or gemm_attributes.get("transA", False)
):
# we do not currently handle Gemms with transposed A, or scalar multiples
continue
transpose_weight = bool(gemm_attributes.get("transB"))
graph = ONNXGraph(model)
#############
# Matching
#############
weight_dequantize_node = graph.get_node_single_parent(gemm_node, 1)
if (
not weight_dequantize_node
or weight_dequantize_node.op_type != "DequantizeLinear"
):
continue
weight_quantize_node = graph.get_node_single_parent(weight_dequantize_node, 0)
if not weight_quantize_node or weight_quantize_node.op_type != "QuantizeLinear":
continue
input_quantize_node = graph.get_node_single_parent(gemm_node, 0)
if (
not input_quantize_node
or input_quantize_node.op_type not in _QUANTIZE_OP_NAMES
):
continue
input_quantize_params = get_quantization_params(
model, input_quantize_node, include_target=False
)
weight_quantize_params = get_quantization_params(
model, weight_quantize_node, include_target=True
)
if weight_quantize_params.target is None:
# weight initializer not included
continue
if input_quantize_node.op_type != "DequantizeLinear":
continue
bias_initializer = graph.get_init_by_name(gemm_node.input[2])
if bias_initializer is None:
continue
_LOGGER.debug(f"Matched quantizable Gemm weight and bias: {gemm_node.name}")
# Conversion
_add_quantized_conv_matmul_add_ops(
model=model,
node=gemm_node,
input_quantize_node=input_quantize_node,
weight_quantize_node=weight_quantize_node,
input_quantize_params=input_quantize_params,
weight_quantize_params=weight_quantize_params,
target_output=gemm_node.output[0],
transpose_weight=transpose_weight,
bias_add_name="{}_bias_add".format(gemm_node.name),
bias_initializer=bias_initializer,
)
# Cleanup
# delete folded quantization ops
delete_quant_node(model, weight_dequantize_node)
delete_quant_node(model, weight_quantize_node)
# only delete input node if the matmul is the only child
current_graph = ONNXGraph(model)
if len(current_graph.get_node_children(input_quantize_node)) == 1:
delete_quant_node(model, input_quantize_node)
# delete original Gemm node
remove_node_and_params_from_graph(model, gemm_node)
conversion_count += 1
if gemm_nodes:
_LOGGER.info(
f"Converted {conversion_count} quantizable Gemm ops with weight and bias "
"to MatMulInteger and Add"
)
graph = ONNXGraph(model)
graph.delete_unused_initializers()
def _convert_quantizable_matmul_and_add(model: ModelProto):
"""
A pass for converting a MatMul with kernel and bias into a quantized representation
| Starting with:
| INPUT QuantizeLinear (with constant kernel)
| | |
| QuantizeLinear DequantizeLinear
| | |
| DequantizeLinear Transpose
| | |
| MatMul
| |
| Add (with constant bias)
| |
| QuantizeLinear (Optional)
| |
| DequantizeLinear (Optional)
| |
| OUTPUT
| We end up converting to:
| INPUT
| |
| QuantizeLinear
| |
| MatMulInteger (with constant uint8 kernel)
| |
| Add (constant bias + zero point correction)
| |
| Cast (INT32 -> FP32)
| |
| Mul (Rescale from bias scale)
| |
| OUTPUT
"""
conversion_count = 0
matmul_nodes = [n for n in model.graph.node if n.op_type in ["MatMul"]]
for matmul_node in matmul_nodes:
graph = ONNXGraph(model)
#############
# Matching
#############
weight_transpose_node = graph.get_node_single_parent(matmul_node, 1)
if not weight_transpose_node or weight_transpose_node.op_type != "Transpose":
continue
weight_dequantize_node = graph.get_node_single_parent(weight_transpose_node, 0)
if (
not weight_dequantize_node
or weight_dequantize_node.op_type != "DequantizeLinear"
):
continue
weight_quantize_node = graph.get_node_single_parent(weight_dequantize_node, 0)
if not weight_quantize_node or weight_quantize_node.op_type != "QuantizeLinear":
continue
input_quantize_node = graph.get_node_single_parent(matmul_node, 0)
if (
not input_quantize_node
or input_quantize_node.op_type not in _QUANTIZE_OP_NAMES
):
continue
bias_add_node = graph.get_node_single_child(matmul_node)
if not bias_add_node or bias_add_node.op_type != "Add":
continue
output_quantize_node = None
output_dequantize_node = None
input_quantize_params = get_quantization_params(
model, input_quantize_node, include_target=False
)
weight_quantize_params = get_quantization_params(
model, weight_quantize_node, include_target=True
)
if weight_quantize_params.target is None:
# weight initializer not included
continue
if input_quantize_node.op_type != "DequantizeLinear":
continue
if output_quantize_node and output_quantize_node.op_type != "QuantizeLinear":
continue
bias_initializer = get_init_by_name(model, bias_add_node.input[1]) or (
get_init_by_name(model, bias_add_node.input[0])
)
if bias_initializer is None:
continue
_LOGGER.debug(f"Matched quantizable MatMul weight and bias: {matmul_node.name}")
# Conversion
_add_quantized_conv_matmul_add_ops(
model=model,
node=matmul_node,
input_quantize_node=input_quantize_node,
weight_quantize_node=weight_quantize_node,
input_quantize_params=input_quantize_params,
weight_quantize_params=weight_quantize_params,
target_output=(
output_dequantize_node.output[0]
if output_dequantize_node
else bias_add_node.output[0]
),
transpose_weight=True,
bias_add_name=bias_add_node.name,
bias_initializer=bias_initializer,
output_quantize_node=output_quantize_node,
output_dequantize_node=output_dequantize_node,
)
# Cleanup
# delete folded quantization ops
delete_quant_node(model, weight_dequantize_node)
delete_quant_node(model, weight_quantize_node)
remove_node_and_params_from_graph(model, weight_transpose_node)
# only delete input node if the matmul is the only child
current_graph = ONNXGraph(model)
if len(current_graph.get_node_children(input_quantize_node)) == 1:
delete_quant_node(model, input_quantize_node)
if output_quantize_node:
delete_quant_node(model, output_quantize_node)
if output_dequantize_node:
delete_quant_node(model, output_dequantize_node)
# delete original Gemm node
remove_node_and_params_from_graph(model, matmul_node)
# delete original Add node
remove_node_and_params_from_graph(model, bias_add_node)
conversion_count += 1
if matmul_nodes:
_LOGGER.info(
f"Converted {conversion_count} quantizable MatMul ops with weight and bias "
"to MatMulInteger and Add"
)
graph = ONNXGraph(model)
graph.delete_unused_initializers()
def _convert_quantizable_conv_integer(model: ModelProto):
"""
A pass for converting a Conv op with kernel whose activations
are not necessarily quantized into a ConvInteger followed by
a bias add and cast to FP32
| Starting with:
| INPUT QuantizeLinear (with constant kernel)
| | |
| QuantizeLinear DequantizeLinear
| | |
| DequantizeLinear |
| | |
| Conv (with optional bias)
| |
| OUTPUT
| We end up converting to:
| INPUT
| |
| QuantizeLinear
| |
| ConvInteger (with constant uint8 kernel)
| |
| Add (optional, constant bias + zero point correction)
| |
| Cast (INT32 -> FP32)
| |
| Mul (Rescale from bias scale)
| |
| OUTPUT
"""
conversion_count = 0
conv_nodes = [n for n in model.graph.node if n.op_type in ["Conv"]]
orig_conv_weight_name_to_node_ids = defaultdict(list)
for conv_node in conv_nodes:
# if len(conv_node.input) != 3:
# # this function currently only converts Conv nodes with bias param
# # (i.e. from folded batch norm value)
# continue
graph = ONNXGraph(model)
#############
# Matching
#############
weight_dequantize_node = graph.get_node_single_parent(conv_node, 1)
if (
not weight_dequantize_node
or weight_dequantize_node.op_type != "DequantizeLinear"
):
continue
weight_quantize_node = graph.get_node_single_parent(weight_dequantize_node, 0)
if not weight_quantize_node or weight_quantize_node.op_type != "QuantizeLinear":
continue
input_quantize_node = graph.get_node_single_parent(conv_node, 0)
if (
not input_quantize_node
or input_quantize_node.op_type not in _QUANTIZE_OP_NAMES
):
continue
input_quantize_params = get_quantization_params(
model, input_quantize_node, include_target=False
)
weight_quantize_params = get_quantization_params(
model, weight_quantize_node, include_target=True
)
if weight_quantize_params.target is None:
# weight initializer not included
continue
if input_quantize_node.op_type != "DequantizeLinear":
continue
if len(conv_node.input) == 3:
bias_initializer = graph.get_init_by_name(conv_node.input[2])
else:
bias_initializer = None
if bias_initializer is None:
_LOGGER.debug(f"Matched quantizable Conv weight: {conv_node.name}")
else:
_LOGGER.debug(f"Matched quantizable Conv weight and bias: {conv_node.name}")
# Conversion
_add_quantized_conv_matmul_add_ops(
model=model,
node=conv_node,
input_quantize_node=input_quantize_node,
weight_quantize_node=weight_quantize_node,
input_quantize_params=input_quantize_params,
weight_quantize_params=weight_quantize_params,
target_output=conv_node.output[0],
transpose_weight=False,
bias_add_name="{}_bias_add".format(conv_node.name),
bias_initializer=bias_initializer,
)
orig_conv_weight_name_to_node_ids[input_quantize_node.input[0]].append(
"{}_quant".format(conv_node.output[0])
)
# Cleanup
# delete folded quantization ops
delete_quant_node(model, weight_dequantize_node)
delete_quant_node(model, weight_quantize_node)
# only delete input node if the conv is the only child
current_graph = ONNXGraph(model)
if len(current_graph.get_node_children(input_quantize_node)) == 1:
delete_quant_node(model, input_quantize_node)
# delete original Conv node
remove_node_and_params_from_graph(model, conv_node)
conversion_count += 1
if conv_nodes:
_LOGGER.info(
f"Converted {conversion_count} quantizable Conv ops with weight and bias "
"to ConvInteger and Add"
)
_reduce_qconv_shared_weights(model, orig_conv_weight_name_to_node_ids)
graph = ONNXGraph(model)
graph.delete_unused_initializers()
def _convert_quantizable_ops(model: ModelProto, convert_qlinearconv: bool):
quantizable_nodes = [n for n in model.graph.node if n.op_type in ["Conv", "Gemm"]]
orig_qconv_weight_name_to_node_ids = defaultdict(list)
for quantizable_node in quantizable_nodes:
graph = ONNXGraph(model)
weight_dequant = graph.get_node_single_parent(quantizable_node, 1)
if not weight_dequant or weight_dequant.op_type != "DequantizeLinear":
continue
weight_quant = graph.get_node_single_parent(weight_dequant, 0)
if not weight_quant or weight_quant.op_type != "QuantizeLinear":
continue
input_quant = graph.get_node_single_parent(quantizable_node, 0)
if not input_quant or input_quant.op_type not in _QUANTIZE_OP_NAMES:
continue
output_quant = graph.get_node_single_child(quantizable_node)
if not output_quant or output_quant.op_type not in _QUANTIZE_OP_NAMES:
continue
if convert_qlinearconv and quantizable_node.op_type == "Conv":
weight_name = weight_quant.input[0]
qconv_node = _convert_quantizable_conv(
model,
quantizable_node,
input_quant,
weight_dequant,
weight_quant,
output_quant,
)
orig_qconv_weight_name_to_node_ids[weight_name].append(qconv_node.output[0])
if quantizable_node.op_type == "Gemm":
output_dequant = graph.get_node_single_child(output_quant)
if output_dequant and output_dequant.op_type in _QUANTIZE_OP_NAMES:
output_dequant_child = graph.get_node_single_child(output_dequant)
if output_dequant_child and output_dequant_child.op_type == "Gemm":
# output quant is not a QDQ block for the current Gemm Node but,
# the input QDQ block for a new Gemm block this Gemm should be
# skipped and processed by _convert_quantizable_gemm_no_activations
continue
_convert_quantizable_gemm(
model,
quantizable_node,
input_quant,
weight_dequant,
weight_quant,
output_quant,
)
_reduce_qconv_shared_weights(model, orig_qconv_weight_name_to_node_ids)
def _quantize_qat_embedding(model: ModelProto):
"""
A pass for quantizing qat embeddings
Starting with:
| INPUT QuantizeLinear (with constant embedding)
| | |
| | DequantizeLinear
| | |
| Gather
| |
| QuantizeLinear (Optional)
| |
| DequantizeLinear (Optional)
| |
| OUTPUT
Converts to:
| INPUT
| |
| Gather(UINT8 data initializer)
| |
| DequantizeLinear
| |
| OUTPUT
"""
graph = ONNXGraph(model)
gather_nodes = [node for node in model.graph.node if node.op_type == "Gather"]
converted_nodes = 0
for gather_node in gather_nodes:
# find input quant and dequant nodes
input_dequant_node = graph.get_node_single_parent(gather_node, 0)
if not input_dequant_node or input_dequant_node.op_type != "DequantizeLinear":
continue
input_quant_node = graph.get_node_single_parent(input_dequant_node, 0)
if not input_quant_node or input_quant_node.op_type != "QuantizeLinear":
continue
# find embedding weights, sclae, and zero point
embedding_initializer = graph.get_init_by_name(input_quant_node.input[0])
scale_initializer = graph.get_init_by_name(input_quant_node.input[1])
zp_initializer = graph.get_init_by_name(input_quant_node.input[2])
if not embedding_initializer or not scale_initializer or not zp_initializer:
continue
# quantize embedding
embedding = numpy_helper.to_array(embedding_initializer)
scale = numpy_helper.to_array(scale_initializer)
zero_point = numpy_helper.to_array(zp_initializer)
embedding_quant = _quantize_array(
embedding, scale, zero_point, zero_point.dtype
)
embedding_quant_initializer = numpy_helper.from_array(
embedding_quant, name=f"{embedding_initializer.name}_quant"
)
# update graph
model.graph.initializer.append(embedding_quant_initializer)
gather_node.input[0] = embedding_quant_initializer.name
# detect QDQ block on output
output_quant_node = graph.get_node_single_child(gather_node)
if output_quant_node and output_quant_node.op_type == "QuantizeLinear":
output_dequant_node = graph.get_node_single_child(output_quant_node)
qdq_output = (
output_dequant_node
and output_dequant_node.op_type == "DequantizeLinear"
)
else:
qdq_output = False
if qdq_output:
# forward gather output to dequant input
output_dequant_node.input[0] = gather_node.output[0]
output_dequant_node.input[1] = input_quant_node.input[1]
output_dequant_node.input[2] = input_quant_node.input[2]
# delete unnecessary quantize and dequantize ops
delete_quant_node(model, input_quant_node)
delete_quant_node(model, input_dequant_node)
delete_quant_node(model, output_quant_node)
else:
# use input dequant to dequantize output
embedding_quant_output_id = f"{gather_node.output[0]}_quant"
input_dequant_node.input[0] = embedding_quant_output_id
input_dequant_node.output[0] = gather_node.output[0]
gather_node.output[0] = embedding_quant_output_id
delete_quant_node(model, input_quant_node)
graph.update()
converted_nodes += 1
graph.delete_unused_initializers()
if converted_nodes > 0:
_LOGGER.info(f"Converted {converted_nodes} QAT embedding ops to UINT8")
def _remove_duplicate_quantize_ops(model: ModelProto):
quantize_ops_by_input = defaultdict(list)
for node in model.graph.node:
if node.op_type == "QuantizeLinear":
quantize_ops_by_input[node.input[0]].append(node)
graph = ONNXGraph(model)
for quantize_op_group in quantize_ops_by_input.values():
if len(quantize_op_group) == 1:
continue
keep_node = quantize_op_group[0]
keep_node_params = get_quantization_params(graph, keep_node)
remove_nodes = quantize_op_group[1:]
for remove_node in remove_nodes:
remove_node_params = get_quantization_params(graph, remove_node)
if keep_node_params == remove_node_params:
_replace_input_id_model(
model, remove_node.output[0], keep_node.output[0]
)
delete_quant_node(model, remove_node)
# cleanup graph
graph.update()
graph.delete_unused_initializers()
def _propagate_mobilebert_embedding_quantization(model: ModelProto):
"""
A pass for propagating embedding quantizations through concat
Starting with:
| GATHER (UINT8 data initializer)
| |
| DequantizeLinear
| | | |
| | Slice Slice
| | | |
| | Pad Pad
| | | |
| Concat
| |
| OUTPUT
Converts to:
| GATHER (UINT8 data initializer)
| | | |
| | Slice Slice
| | | |
| | Pad Pad
| | | |
| Concat
| |
| DequantizeLinear
| |
| OUTPUT
"""
converted_nodes = 0
gather_nodes = [n for n in model.graph.node if n.op_type in ["Gather"]]
graph = ONNXGraph(model)
for gather_node in gather_nodes:
# find quantized weight
embedding_initializer = graph.get_init_by_name(gather_node.input[0])
if not embedding_initializer:
continue
embedding_array = numpy_helper.to_array(embedding_initializer)
if embedding_array.dtype not in [numpy.uint8, numpy.int8]:
continue
dequant_node = graph.get_node_single_child(gather_node)
if not dequant_node or dequant_node.op_type != "DequantizeLinear":
continue
# loop through the children of the dequantize node and check if they
# are composed of slice + pad nodes and converge at the same concat node
valid = True
concat_node = None
for branch_node in graph.get_node_children(dequant_node):
if branch_node.op_type == "Slice":
pad_node = graph.get_node_single_child(branch_node)
if not pad_node or pad_node.op_type != "Pad":
valid = False
break
concat_node_ = graph.get_node_single_child(pad_node)
if not concat_node_ or concat_node_.op_type != "Concat":
valid = False
break
if concat_node is None:
concat_node = concat_node_
elif concat_node != concat_node_:
valid = False
break
elif branch_node.op_type == "Concat":
if concat_node is None:
concat_node = branch_node
elif branch_node != concat_node:
valid = False
break
else:
valid = False
break
if not valid or not concat_node:
continue
# switch position of dequantize node
for branch_node in graph.get_node_children(dequant_node):
if branch_node.op_type == "Slice":
zero_point = graph.get_init_by_name(dequant_node.input[2])
zero_point_array = numpy_helper.to_array(zero_point)
branch_node.input[0] = gather_node.output[0]
pad_node = graph.get_node_single_child(branch_node)
pad_value = graph.get_init_by_name(pad_node.input[2])
pad_value_array = numpy_helper.to_array(pad_value)
pad_value_array = (
pad_value_array.astype(zero_point_array.dtype) + zero_point_array
)
model.graph.initializer.remove(pad_value)
pad_value = numpy_helper.from_array(
pad_value_array, name=pad_value.name
)
model.graph.initializer.append(pad_value)
for id, input_name in enumerate(concat_node.input):
if input_name == dequant_node.output[0]:
break
concat_node.input[id] = gather_node.output[0]
temp = concat_node.output[0]
concat_node.output[0] = dequant_node.output[0]
dequant_node.output[0] = temp
dequant_node.input[0] = concat_node.output[0]
graph.update()
converted_nodes += 1
graph.delete_unused_initializers()
if converted_nodes > 0:
_LOGGER.info(
f"Propagated {converted_nodes} DequantizeLinear node(s) through Concat"
)
def _propagate_through_split(model: ModelProto):
"""
A pass for propagating dequantization down through a split node
so if there are quantized operations after the split they can
be properly converted
Starting with:
| INPUT
| |
| DequantizeLinear
| |
| Split
| | | |
Converts to:
| INPUT
| |
| Split
| | | |
| DequantizeLinear DequantizeLinear DequantizeLinear
| | | |
"""
new_nodes = []
to_remove = []
split_nodes = [n for n in model.graph.node if n.op_type in ["Split"]]
graph = ONNXGraph(model)
for split_node in split_nodes:
dequant_node = graph.get_node_single_parent(split_node, 0)
if not dequant_node or dequant_node.op_type != "DequantizeLinear":
continue
# Make input to dequantize linear node input to split node
split_node.input[0] = dequant_node.input[0]
# For every output of split create a dequantize linear node
dequant_id = 0
for other_node in get_node_output_nodes(model, split_node):
split_node_output = []
for out in split_node.output:
if out in other_node.input:
split_node_output.append(out)
for out in split_node_output:
dequant_node_name = split_node.name + f"_dequant.{dequant_id}"
dequant_id += 1
dequant_node_output = dequant_node_name + "_output"
new_nodes.append(
onnx.helper.make_node(
"DequantizeLinear",
[
out, # input
dequant_node.input[1], # scale
dequant_node.input[2], # zero point
],
[dequant_node_output],
dequant_node_name,
)
)
for other_node_input_index, other_node_input in enumerate(
other_node.input
):
if other_node_input == out:
break
other_node.input[other_node_input_index] = dequant_node_output
to_remove.append(dequant_node)
model.graph.node.extend(new_nodes)
for node in to_remove:
model.graph.node.remove(node)
if len(to_remove) > 0:
_LOGGER.info(
f"Propagated {len(to_remove)} DequantizeLinear node(s) through Split"
)
The provided code snippet includes necessary dependencies for implementing the `quantize_torch_qat_export` function. Write a Python function `def quantize_torch_qat_export( model: Union[ModelProto, str], output_file_path: Union[str, None] = None, inplace: bool = True, use_qlinearconv: bool = False, ) -> ModelProto` to solve the following problem:
:param model: The model to convert, or a file path to it :param output_file_path: File path to save the converted model to :param inplace: If true, does conversion of model in place. Default is true :param use_qlinearconv: Set True to use legacy QLinearConv format instead of ConvInteger. QLinearConv requires output activations be quantized in the quantization recipe. (This was the default behavior prior to sparseml 0.12). Default is False :return: Converts a model exported from a torch QAT session from a QAT graph with fake quantize ops surrounding operations to a quantized graph with quantized operations. All quantized Convs and FC inputs and outputs be surrounded by fake quantize ops
Here is the function:
def quantize_torch_qat_export(
model: Union[ModelProto, str],
output_file_path: Union[str, None] = None,
inplace: bool = True,
use_qlinearconv: bool = False,
) -> ModelProto:
"""
:param model: The model to convert, or a file path to it
:param output_file_path: File path to save the converted model to
:param inplace: If true, does conversion of model in place. Default is true
:param use_qlinearconv: Set True to use legacy QLinearConv format instead
of ConvInteger. QLinearConv requires output activations be quantized
in the quantization recipe. (This was the default behavior prior to
sparseml 0.12). Default is False
:return: Converts a model exported from a torch QAT session from a QAT graph with
fake quantize ops surrounding operations to a quantized graph with quantized
operations. All quantized Convs and FC inputs and outputs be surrounded by
fake quantize ops
"""
if isinstance(model, str):
model = onnx.load(model)
if not inplace:
model = deepcopy(model)
_convert_single_constants_to_initializers(model)
_fold_qat_conv_bns(model)
_delete_repeated_qat_blocks(model)
_quantize_qat_embedding(model)
_propagate_mobilebert_embedding_quantization(model)
_propagate_through_split(model)
_convert_quantizable_matmul(model)
_convert_quantizable_matmul_and_add(model)
_fold_relu_quants(model)
# only convert to either ConvInteger or QLinearConv (legacy)
if not use_qlinearconv:
_convert_quantizable_conv_integer(model)
_convert_quantizable_ops(model, convert_qlinearconv=use_qlinearconv)
_convert_quantizable_gemm_no_activations(model)
quantize_resnet_identity_add_inputs(model)
_remove_duplicate_quantize_ops(model)
_convert_signed_to_unsigned(model)
graph = ONNXGraph(model)
graph.sort_nodes_topologically()
graph.delete_unused_initializers()
if output_file_path:
save_onnx(model, output_file_path)
return model | :param model: The model to convert, or a file path to it :param output_file_path: File path to save the converted model to :param inplace: If true, does conversion of model in place. Default is true :param use_qlinearconv: Set True to use legacy QLinearConv format instead of ConvInteger. QLinearConv requires output activations be quantized in the quantization recipe. (This was the default behavior prior to sparseml 0.12). Default is False :return: Converts a model exported from a torch QAT session from a QAT graph with fake quantize ops surrounding operations to a quantized graph with quantized operations. All quantized Convs and FC inputs and outputs be surrounded by fake quantize ops |
21,221 | import logging
from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict, List, NamedTuple, Optional, Union
import numpy
import onnx
import torch
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import (
ONNXGraph,
get_batch_norm_params,
get_init_by_name,
get_node_attributes,
get_node_output_nodes,
quantize_resnet_identity_add_inputs,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparsezoo.utils import save_onnx
def _skip_input_quantize(model: ModelProto) -> Optional[str]:
if (
len(model.graph.input) != 1
or model.graph.input[0].type.tensor_type.elem_type != 1
):
# more than 1 input or input is not FP32
return (
"Not modifying ONNX graph inputs - either graph has more than one "
"input or input type is not FP32"
)
input_node = model.graph.input[0]
input_children = [
node for node in model.graph.node if input_node.name in node.input
]
if not all(node.op_type == "QuantizeLinear" for node in input_children):
return (
"Not modifying ONNX graph inputs - only QuantizeLinear nodes may follow"
"the FP32 input tensor in original graph, prior to converting to uint8"
)
_delete_quantize_nodes(ONNXGraph(model), input_children)
input_node.type.tensor_type.elem_type = 2 # fp32 -> uint8
_LOGGER.info("Model initial QuantizeLinear node(s) deleted and inputs set to uint8")
return None
def _skip_trivially_nested_input_quantize(model: ModelProto) -> bool:
# converts: input -> (some series of slices and concats) -> QuantizeLinear -> Any
# to: input[uint8] -> (some series of slices and concats) -> Any
if (
len(model.graph.input) != 1
or model.graph.input[0].type.tensor_type.elem_type != 1
):
# more than 1 input or input is not FP32
return False
input_node = model.graph.input[0]
node_queue = [node for node in model.graph.node if input_node.name in node.input]
_trivial_node_types = {"Concat", "Slice"}
graph = ONNXGraph(model)
found_quantize_nodes = []
while node_queue:
current_node = node_queue.pop(0)
if current_node.op_type == "QuantizeLinear":
found_quantize_nodes.append(current_node)
elif current_node.op_type not in _trivial_node_types:
break
else:
node_queue.extend(graph.get_node_children(current_node))
if (
node_queue
or not found_quantize_nodes
or not all(node == found_quantize_nodes[0] for node in found_quantize_nodes)
):
# loop exited because non-trivial node found before QuantizeLinear,
# no QuantizeLinear node found, or different QuantizeLinear nodes found
return False
_delete_quantize_nodes(graph, [found_quantize_nodes[0]])
input_node.type.tensor_type.elem_type = 2 # fp32 -> uint8
_LOGGER.info("Model initial QuantizeLinear node(s) deleted and inputs set to uint8")
return True
The provided code snippet includes necessary dependencies for implementing the `skip_onnx_input_quantize` function. Write a Python function `def skip_onnx_input_quantize( model: Union[ModelProto, str], output_file_path: Union[str, None] = None, )` to solve the following problem:
If the given model has a single FP32 input that feeds into a QuantizeLinear node, then the input will be changed to uint8 and the QuantizeLinear node will be deleted. This enables quantize graphs to take quantized inputs instead of floats. If no optimization is made, a RuntimeError will be raised. :param model: The model to convert, or a file path to it :param output_file_path: File path to save the converted model to
Here is the function:
def skip_onnx_input_quantize(
model: Union[ModelProto, str],
output_file_path: Union[str, None] = None,
):
"""
If the given model has a single FP32 input that feeds into a QuantizeLinear
node, then the input will be changed to uint8 and the QuantizeLinear node will be
deleted. This enables quantize graphs to take quantized inputs instead of floats.
If no optimization is made, a RuntimeError will be raised.
:param model: The model to convert, or a file path to it
:param output_file_path: File path to save the converted model to
"""
if isinstance(model, str):
model = onnx.load(model)
optim_error_message = _skip_input_quantize(model)
if optim_error_message and not _skip_trivially_nested_input_quantize(model):
raise RuntimeError(optim_error_message)
if output_file_path:
save_onnx(model, output_file_path) | If the given model has a single FP32 input that feeds into a QuantizeLinear node, then the input will be changed to uint8 and the QuantizeLinear node will be deleted. This enables quantize graphs to take quantized inputs instead of floats. If no optimization is made, a RuntimeError will be raised. :param model: The model to convert, or a file path to it :param output_file_path: File path to save the converted model to |
21,222 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.pytorch.sparsification.quantization.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.pytorch.sparsification.quantization.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationScheme,
)
from sparseml.pytorch.utils import get_layer
def is_quantizable_module(
module: Module,
exclude_module_types: Optional[List[str]] = None,
) -> bool:
"""
:param module: module to check
:param exclude_module_types: string names of modules to not include for
quantization. Default None
:return: boolean value if the module is quantizable. Module is considered
quantizable if its type is not included in exclude_module_types or
NON_QUANTIZABLE_MODULE_NAMES and
it either has no module children outside of QAT or is a torch qat fused module
"""
# considers any non-excluded "leaf level" (no children) submodule
# to be quantizable as well as torch fused modules
# add all default excluded module type names
exclude_module_types = set(exclude_module_types or [])
exclude_module_types.update(NON_QUANTIZABLE_MODULE_NAMES)
module_type_name = module.__class__.__name__
if module_type_name in exclude_module_types:
return False
return (
module_type_name in FUSED_MODULE_NAMES
or all(
# no children (leaf modules) evaluate to all([]) - (True)
is_qat_helper_module(child)
for child in module.children()
)
or isinstance(module, torch_quantization.QuantWrapper)
)
def _match_submodule_name_or_type(
submodule: Module, submodule_name: str, names_or_types: List[str]
) -> Optional[str]:
# match preferences:
# 1. match the submodule prefix (longest first)
# 2. match module type name
submodule_match = ""
for name_or_type in names_or_types:
name_to_compare = submodule_name[:]
if name_to_compare.startswith("module."):
name_to_compare = name_to_compare[7:]
if name_to_compare.startswith(name_or_type) and (
len(name_or_type) > len(submodule_match)
):
# match to most specific submodule name
submodule_match = name_or_type
# If didn't find prefix, try to match to match type
if not submodule_match:
for name_or_type in names_or_types:
if name_or_type == submodule.__class__.__name__:
# type match, return type name
return name_or_type
return submodule_match or None # return None if no match
def _inject_qat_wrapper(
root_module: Module,
target_submodule_name: str,
quantization_scheme: QuantizationScheme,
):
submodule_name_parts = target_submodule_name.split(".")
parent_name = ".".join(submodule_name_parts[:-1])
parent_module = get_layer(parent_name, root_module)
target_module = getattr(parent_module, submodule_name_parts[-1])
wrapped_target_module = QATWrapper.from_module(target_module, quantization_scheme)
setattr(parent_module, submodule_name_parts[-1], wrapped_target_module)
def _validate_set_module_schemes(
model: Module,
scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None,
ignore: Optional[List[str]] = None,
):
def _get_unmatched_types_or_names(types_or_names):
unmatched = []
for type_or_name in types_or_names:
matched = False
for submodule_name, submodule in model.named_modules():
name_to_compare = submodule_name[:]
if name_to_compare.startswith("module."):
name_to_compare = name_to_compare[7:]
if name_to_compare.startswith(type_or_name) or (
submodule.__class__.__name__ == type_or_name
):
matched = True
break
if not matched:
unmatched.append(type_or_name)
return unmatched
def _build_error_str(property_name, unmatched_values):
return (
f"{property_name} contains submodule names or module types "
"that do not match to any submodules in the model. "
f"unmatched values: {unmatched_values}"
)
unmatched_scheme_overrides = _get_unmatched_types_or_names(scheme_overrides)
if unmatched_scheme_overrides:
raise ValueError(
_build_error_str("scheme_overrides", unmatched_scheme_overrides)
)
unmatched_ignore = _get_unmatched_types_or_names(ignore)
if unmatched_ignore:
raise ValueError(_build_error_str("ignore", unmatched_ignore))
class QuantizationScheme(BaseModel):
"""
Class composed of QuantizationArgs to build QConfig and QuantWrapper objects for
quantizing models. Provides a simple user interface for defining how inputs,
weights, and outputs should be quantized
"""
def __init__(self, *args, **kwargs):
# support for loading from yaml str
args = [arg if arg != "null" else None for arg in args]
for key, val in kwargs.items():
if val == "null":
kwargs[key] = None
super().__init__(*args, **kwargs)
input_activations: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_activation_args,
description=(
"target quantization setting for input activations. Set to None to "
"not quantize input activations. Default is 8 bits asymmetric"
),
)
weights: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_weight_args,
description=(
"target quantization setting for model weights. Set to None to "
"not quantize weights. Default is 8 bits symmetric"
),
)
output_activations: Optional[QuantizationArgs] = Field(
default=None,
description=(
"target quantization setting for output activations. Set to None to "
"not quantize output activations. Default is None"
),
)
target_hardware: Optional[str] = Field(
default=None,
description=(
"target deployment runtime/hardware name to be set by default "
"classmethods. Default is None"
),
)
def load(
cls,
scheme: QuantizationSchemeLoadable,
default: Optional["QuantizationScheme"] = None,
) -> "QuantizationScheme":
"""
:param scheme: QuantizationScheme, dict representation of scheme,
or string alias of a scheme to load. Valid strings:
['default', 'deepsparse', 'tensorrt']
:param default: default QuantizationScheme to override 'default' scheme
with
:return: constructed QuantizationScheme object from the given scheme;
if given a dict, returns QuantizationScheme.parse_obj(scheme), string
input will return the defualt QuantizationScheme if set to 'default'.
"""
if isinstance(scheme, cls):
return scheme
elif scheme is None or scheme == "default":
# if no default override, defaults to QuantizationScheme()
return deepcopy(default) or cls()
elif isinstance(scheme, str):
if scheme == "deepsparse":
return cls.deepsparse()
elif scheme == "tensorrt":
return cls.tensorrt()
raise ValueError(
f"Unrecognized QuantizationScheme string alias {scheme}. "
"Valid strings: ['default', 'deepsparse', 'tensorrt']"
)
elif isinstance(scheme, dict):
# default to dict
scheme = {key: _parse_quantization_arg(arg) for key, arg in scheme.items()}
return cls.parse_obj(scheme)
else:
raise ValueError(
f"Unrecognized type {type(scheme)} for QuantizationScheme.load, "
"expected one of: [QuantizationScheme, Dict, str, None]"
)
def deepsparse(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for deepsparse targeted deployments -
int8, symmetric weights, asymmetric inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=False),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="deepsparse",
)
def tensorrt(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for tensorrt targeted deployments -
compatibility with explict quantization as supported by TensorRT 8.2:
int8, symmetric for both weights and inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=True),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="tensorrt",
)
def get_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for Modules (output activations used,
use QuantWrapper for inputs)
"""
qconfig = _get_qconfig(self.output_activations, self.weights)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def get_wrapper_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for QuantWrapper objects (input activations used)
"""
qconfig = _get_qconfig(self.input_activations, None)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def __str__(self) -> str:
"""
:return: YAML friendly string serialization
"""
dict_repr = self.dict()
dict_repr = {
key: val if val is not None else "null" for key, val in dict_repr.items()
}
return str(dict_repr)
The provided code snippet includes necessary dependencies for implementing the `set_quantization_schemes` function. Write a Python function `def set_quantization_schemes( model: Module, scheme: QuantizationScheme, scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None, ignore: Optional[List[str]] = None, strict: bool = True, )` to solve the following problem:
Sets an appropriate `quantization_scheme` to targeted quantizable submodules :param model: module to attach QuantizationSchemes to :param scheme: default scheme to add to a target module unless overwritten by another scheme :param scheme_overrides: dictionary of module type names or submodule names mapped to a quantization scheme to override with. If a submodule matches to multiple submodule overrides and/or a module type, module type will take the highest priority followed by the longest matched submodule name :param ignore: string names of modules type names or submodule names to not include for quantization. Default None :param strict: if True, will raise an error if any module types or submodules in scheme_overrides or ignore are not found in the given module. Default True
Here is the function:
def set_quantization_schemes(
model: Module,
scheme: QuantizationScheme,
scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None,
ignore: Optional[List[str]] = None,
strict: bool = True,
):
"""
Sets an appropriate `quantization_scheme` to targeted quantizable submodules
:param model: module to attach QuantizationSchemes to
:param scheme: default scheme to add to a target module unless overwritten
by another scheme
:param scheme_overrides: dictionary of module type names or submodule names
mapped to a quantization scheme to override with. If a submodule matches
to multiple submodule overrides and/or a module type, module type will
take the highest priority followed by the longest matched submodule name
:param ignore: string names of modules type names or submodule names to not include
for quantization. Default None
:param strict: if True, will raise an error if any module types or submodules in
scheme_overrides or ignore are not found in the given module. Default True
"""
# default to empty dict
scheme_overrides = scheme_overrides or {}
if strict:
_validate_set_module_schemes(model, scheme_overrides, ignore)
# keep mapping of targets for QATWrapper to inject later so module is not modified
# during iteration
wrap_qat_targets = {} # type: Dict[str, QuantizationScheme]
for submodule_name, submodule in model.named_modules():
if ignore and _match_submodule_name_or_type(submodule, submodule_name, ignore):
# submodule type or graph section set to ignore, skip
continue
# override default scheme if necessary
override_key = _match_submodule_name_or_type(
submodule, submodule_name, scheme_overrides
)
submodule_scheme = (
scheme if override_key is None else scheme_overrides[override_key]
)
is_module_type_override = override_key == submodule.__class__.__name__
if getattr(submodule, "wrap_qat", False):
# wrap_qat overrides default scheme behavior
wrap_qat_targets[submodule_name] = submodule_scheme
elif is_module_type_override or is_quantizable_module(submodule):
# is base quantizable module or user specifically targeted module type
submodule.quantization_scheme = submodule_scheme
# inject any targeted QATWrappers
for wraped_module_name, scheme in wrap_qat_targets.items():
_inject_qat_wrapper(model, wraped_module_name, scheme) | Sets an appropriate `quantization_scheme` to targeted quantizable submodules :param model: module to attach QuantizationSchemes to :param scheme: default scheme to add to a target module unless overwritten by another scheme :param scheme_overrides: dictionary of module type names or submodule names mapped to a quantization scheme to override with. If a submodule matches to multiple submodule overrides and/or a module type, module type will take the highest priority followed by the longest matched submodule name :param ignore: string names of modules type names or submodule names to not include for quantization. Default None :param strict: if True, will raise an error if any module types or submodules in scheme_overrides or ignore are not found in the given module. Default True |
21,223 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.pytorch.sparsification.quantization.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.pytorch.sparsification.quantization.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationScheme,
)
from sparseml.pytorch.utils import get_layer
try:
from torch import quantization as torch_quantization
from torch.nn import intrinsic as torch_intrinsic
except Exception:
torch_quantization = None
torch_intrinsic = None
def set_qconfigs_from_quantization_schemes(module: Module):
"""
Sets `qconfig` properties to the given module and its submodule
based on any potentially assigned quantization schemes
:param module: module to set qconfig properties for
"""
for submodule in module.modules():
if not hasattr(submodule, "quantization_scheme"):
continue
# potentially re-load if scheme is set as dict or str
quantization_scheme = QuantizationScheme.load(submodule.quantization_scheme)
if isinstance(submodule, torch_quantization.QuantWrapper):
submodule.qconfig = quantization_scheme.get_wrapper_qconfig()
submodule.quant.qconfig = submodule.qconfig
else:
submodule.qconfig = quantization_scheme.get_qconfig()
def add_input_activation_quant_wrappers(module: Module) -> Module:
"""
Adds QuantWrapper objects to wrap submodules that include quantization
schemes targeting input activations
:param module: module to add input activation QuantWrappers for
:return: the updated module - necessary in case top level module is wrapped
as in-place modification will not support it
"""
# check if module targets input activation quantization
quantize_activations = (
hasattr(module, "quantization_scheme")
and (module.quantization_scheme is not None)
and module.quantization_scheme.input_activations is not None
and not isinstance(module, torch.nn.quantized.FloatFunctional)
)
if quantize_activations:
# wrap module with a QuantWrapper and assign it the input activation qconfig
quantization_scheme = module.quantization_scheme
module = torch_quantization.QuantWrapper(module)
module.quantization_scheme = quantization_scheme
# assumes no nested children of a wrapped block need input activation
# does not recurse further in this case
else:
# recurse to module children
for name, child in module.named_children():
setattr(module, name, add_input_activation_quant_wrappers(child))
return module
def add_output_activation_observers(module: Module):
"""
implementation of torch.quantization add_observers_ that only adds observers
according to attached quantization_scheme properties. the existing implementation
(1.9+) includes its own logic for propagating including overriding set qconfigs
for certain activations without the ability to disable this behavior
:param module: module to add output activation observers to
"""
# adapted from torch/ao/quantization/quantize.py::_add_observer_
# source: https://github.com/pytorch/pytorch/blob/v1.13.0/torch/ao/quantization/quantize.py#L135 # noqa: E501
try:
device = next(module.parameters()).device
except StopIteration:
# default to CPU if module has no parameters
device = "cpu"
def _needs_observer(target_module: Module):
# combines logic from multiple places of original implementation which
# mostly checked for existnace of a qconfig and if the target was a leaf
# module
if not hasattr(target_module, "quantization_scheme") or isinstance(
target_module, torch_quantization.QuantWrapper
):
# submodule not targeted for quantization, already has attached
# output observer, or is QuantWrapper (quant wrapper delegates to children)
return False
if hasattr(target_module, "activation_post_process"):
# activation post process is set, only mark for potential overriding
# if it is an identity (this comes up when the property is set for
# later overriding such as FloatFunctional
return isinstance(target_module.activation_post_process, Identity)
for descendent_module in target_module.modules():
if descendent_module is target_module:
continue # skip itself
descendent_scheme = getattr(descendent_module, "quantization_scheme", None)
if descendent_scheme is not None and (
descendent_scheme.output_activations is not None
):
# a descendent of this module targets output activations, return False
return False
# module has a quantization scheme and no descendents track output activations
return True
def _observer_forward_hook(self, inp, output):
# reference for output activation observer hook to register
return self.activation_post_process(output)
def _add_activation_post_process(target_module: Module):
# get output observer
output_observer = submodule.qconfig.activation()
output_observer.to(device)
# add an activation post process module
target_module.add_module("activation_post_process", output_observer)
# add hook to call observer after output activation has been returned
handle = target_module.register_forward_hook(_observer_forward_hook)
target_module._forward_hooks.move_to_end(handle.id, last=False)
for submodule in module.modules():
if not _needs_observer(submodule):
# submodule not targeted for quantization, already has attached
# output observer, or has a descendent that tracks output activations
continue
# extract qconfig and observer from qconfig
if not hasattr(submodule, "qconfig"):
# set qconfig from scheme if not already set
set_qconfigs_from_quantization_schemes(submodule)
assert hasattr(submodule, "qconfig")
# create observer, add as child module, and register hook to call
_add_activation_post_process(submodule)
def _reattach_quantization_schemes(module: Module):
# after torch.prepare_qat is called, quantization scheme properties may be lost
# due to transfer of base module classes to their QAT implementations
# this function uses the reference to the quantization_scheme in the qconfig
# to potentially re-attach the scheme
for submodule in module.modules():
qconfig = getattr(submodule, "qconfig", None)
if not qconfig or hasattr(submodule, "quantization_scheme"):
# no qconfig, or scheme already set
continue
quantization_scheme = getattr(qconfig, "quantization_scheme", None)
if not quantization_scheme:
continue
submodule.quantization_scheme = quantization_scheme
def _get_qat_module_mappings() -> Dict[Module, Module]:
mappings = torch_quantization.quantization_mappings
if not hasattr(mappings, "get_default_qat_module_mappings"):
# legacy
return mappings.get_qat_module_mappings()
# latest
return mappings.get_default_qat_module_mappings()
def configure_module_default_qconfigs(module: Module):
"""
if any submodule of the given module has a configure_qconfig function,
configure_qconfig will be called on that submodule to set the qconfig(s) of that
module to its default
:param module: module to set qconfigs for
"""
for submodule in module.modules():
if hasattr(submodule, "configure_qconfig") and callable(
getattr(submodule, "configure_qconfig")
):
submodule.configure_qconfig()
def prepare_embeddings_qat(
module: Module,
qproperties: Optional[QConfigProperties] = None,
qconfig: Optional["torch.quantization.QConfig"] = None,
):
"""
adds a fake quantize call to the weights of any Embedding modules in the given
module. The used qconfig will have a heirarchy of
submodule.qconfig -> qconfig -> qproperties
:param module: module to run QAT for the embeddings of
:param qconfig: qconfig to generate the fake quantize ops from if qconfig
not set in moduleDefault uses INT8 asymmetric range
:param qproperties: properties used to define QConfig if qconfig not present
"""
if qconfig is None and qproperties is not None:
qproperties.symmetric_weights = False
qconfig = get_qat_qconfig(qproperties)
for submodule in module.modules():
submodule_qconfig = getattr(submodule, "qconfig", None)
submodule_qconfig = submodule_qconfig or qconfig
if isinstance(submodule, Embedding) and submodule_qconfig is not None:
_prepare_qat_embedding(submodule, submodule_qconfig)
The provided code snippet includes necessary dependencies for implementing the `convert_module_qat_from_schemes` function. Write a Python function `def convert_module_qat_from_schemes(module: Module)` to solve the following problem:
Converts submodules with set quantization_schemes into quantization aware modules with FakeQuantize modules in the model :param module: module to convert to QAT mode
Here is the function:
def convert_module_qat_from_schemes(module: Module):
"""
Converts submodules with set quantization_schemes into quantization aware modules
with FakeQuantize modules in the model
:param module: module to convert to QAT mode
"""
# inject necessary QuantWrappers into the module to apply QAT to
# targeted layer input activations
module = add_input_activation_quant_wrappers(module)
# set appropriate qconfig properties in submodules
set_qconfigs_from_quantization_schemes(module)
# override any qconfigs set in `configure_qconfigs` function
configure_module_default_qconfigs(module)
# set modules with proper qconfigs to QAT mode
convert_kwargs = (
dict(convert_custom_config_dict={}) # do not let torch override any qconfigs
if version.parse(torch.__version__) >= version.parse("1.8.0")
else {}
)
torch_quantization.convert(
module,
mapping=_get_qat_module_mappings(),
inplace=True,
remove_qconfig=False,
**convert_kwargs,
)
# re-attach any quantization schemes lost during conversion
_reattach_quantization_schemes(module)
# add observers for output activations
add_output_activation_observers(module)
# manual pass to convert relevant Embedding layers
prepare_embeddings_qat(module) | Converts submodules with set quantization_schemes into quantization aware modules with FakeQuantize modules in the model :param module: module to convert to QAT mode |
21,224 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.pytorch.sparsification.quantization.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.pytorch.sparsification.quantization.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.pytorch.sparsification.quantization.quantization_scheme import (
QuantizationScheme,
)
from sparseml.pytorch.utils import get_layer
The provided code snippet includes necessary dependencies for implementing the `raise_if_torch_quantization_not_available` function. Write a Python function `def raise_if_torch_quantization_not_available()` to solve the following problem:
:raises: RuntimeError if the installed torch version does not include support for quantization aware training
Here is the function:
def raise_if_torch_quantization_not_available():
"""
:raises: RuntimeError if the installed torch version does not include
support for quantization aware training
"""
if torch_quantization is None or torch_intrinsic is None:
raise RuntimeError(
"Unable to import package torch.quantization and/or "
"torch.nn.intrinsic. "
"Try upgrading your PyTorch version to use the QuantizationModifier."
) | :raises: RuntimeError if the installed torch version does not include support for quantization aware training |
21,225 | from copy import deepcopy
from functools import partial
from typing import Any, Dict, Optional, Union
import torch
from packaging import version
from pydantic import BaseModel, Field, validator
from torch.nn import Identity
class QuantizationArgs(BaseModel):
"""
Class representing user facing arguments to define quantization Observers of
activations or weights in a network
"""
num_bits: int = Field(
default=8, description="number of bits to target for quantization"
)
symmetric: bool = Field(
default=False,
description="set True to use symmetric quantization. Default False",
)
strategy: str = Field(
default="tensor",
description=(
"scope of the quantization to be applied. can be 'tensor' or 'channel'"
),
)
kwargs: Dict[str, Any] = Field(
default_factory=dict,
description=(
"optional dict of kwargs to be passed directly to torch quantization "
"Observers constructor excluding quantization range or symmetry"
),
)
def default_activation_args(cls):
"""
:return: default 8 bits asymmetric settings
"""
return cls(num_bits=8, symmetric=False)
def default_weight_args(cls):
"""
:return: default 8 bits symmetric settings
"""
return cls(num_bits=8, symmetric=True)
def get_observer(self) -> "torch.quantization.FakeQuantize":
"""
:return: torch quantization FakeQuantize built based on these QuantizationArgs
"""
return get_observer(
symmetric=self.symmetric,
strategy=self.strategy,
dtype=torch.qint8,
bits=self.num_bits,
reduce_range=self.kwargs.get("reduce_range", False),
qconfig_kwargs=self.kwargs,
)
def validate_strategy(cls, value):
valid_scopes = ["tensor", "channel"]
if value not in valid_scopes:
raise ValueError(f"`strategy` must be one of {valid_scopes}, got {value}")
return value
def get_observer(
symmetric: bool,
strategy: str,
dtype: torch.dtype,
bits: int,
reduce_range: bool,
qconfig_kwargs: Dict[str, Any],
):
quant_min, quant_max, is_custom_qrange = compute_range(dtype, bits)
if strategy == "channel":
qscheme = torch.per_channel_symmetric if symmetric else torch.per_channel_affine
observer_cls = torch_quantization.MovingAveragePerChannelMinMaxObserver
observer_kwargs = dict(
ch_axis=0,
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
)
else: # default to tensor strategy
qscheme = torch.per_tensor_symmetric if symmetric else torch.per_tensor_affine
observer_cls = torch_quantization.MovingAverageMinMaxObserver
observer_kwargs = dict(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
)
"""
in torch 1.9.1, quant_min and quant_max are not passed to observer:
https://github.com/pytorch/pytorch/blob/v1.9.1/torch/quantization/fake_quantize.py#L109
however in 1.12.0, this is fixed so both are passed to observer:
https://github.com/pytorch/pytorch/blob/v1.12.1/torch/ao/quantization/fake_quantize.py#L132
Passing quant_min/quant_max to observer means the observer will have
`self.has_customized_qrange == True` in both 1.9.1 and 1.12.0.
For whatever reason, both versions calculate zero point for
quint8 differently **if there is a customized_qrange**
1. customized qrange has zero point of 127
2. non-customized has zero point of 128.
source:
https://github.com/pytorch/pytorch/blob/v1.12.1/torch/ao/quantization/observer.py#L293
**we want to ensure that the zero point is 128**
see https://github.com/neuralmagic/sparseml/pull/604
"""
if is_custom_qrange:
# for both versions we need to include the custom min/max values in kwargs
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
if _TORCH_PRE_112:
# pre 1.12, the observer doesn't get passed the quant_min/quant_max values,
# so we patch them in to the constructor of the observer
observer_cls = partial(
observer_cls, quant_min=quant_min, quant_max=quant_max
)
else:
# if using a non custom qrange, we can rely on default values used by
# the observers
if _TORCH_PRE_112:
# pre 1.12, the observer doesn't get passed the quant_min/quant_max values,
# so we are safe to pass these to FakeQuantize
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
else:
# post 1.12 we cannot pass them to the observer since that will set
# has_customized_qrange. instead we rely on the default values
# being equal to the `quant_min` and `quant_max` here.
pass
observer_kwargs["observer"] = observer_cls
observer_kwargs.update(qconfig_kwargs or {})
observer = torch_quantization.FakeQuantize.with_args(
**observer_kwargs,
)
return observer
def _get_qconfig(
activation_args: Optional[QuantizationArgs], weight_args: Optional[QuantizationArgs]
) -> "torch.quantization.QConfig":
return torch_quantization.QConfig(
activation=activation_args.get_observer() if activation_args else Identity,
weight=weight_args.get_observer() if weight_args else Identity,
) | null |
21,226 | from copy import deepcopy
from functools import partial
from typing import Any, Dict, Optional, Union
import torch
from packaging import version
from pydantic import BaseModel, Field, validator
from torch.nn import Identity
def _parse_quantization_arg(arg: Any):
if arg == "None":
return None
return arg | null |
21,227 | import logging
import warnings
from itertools import cycle
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
import torch
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.pytorch.sparsification.modifier import ScheduledModifier
from sparseml.pytorch.sparsification.quantization.helpers import (
CONV_ACTIVATION_NAMES,
LINEAR_ACTIVATION_NAMES,
QConfigProperties,
add_quant_dequant,
configure_module_bn_wrappers,
configure_module_default_qconfigs,
configure_module_qat_wrappers,
freeze_bn_stats,
fuse_module_conv_bn_relus,
get_qat_qconfig,
prepare_embeddings_qat,
remove_activation_qat_by_layer_name,
)
from sparseml.pytorch.utils import BaseLogger, tensors_module_forward, tensors_to_device
from sparseml.sparsification import SparsificationTypes
def _clear_null_qconfigs(model: Module):
for submodule in model.modules():
if hasattr(submodule, "qconfig") and submodule.qconfig is None:
del submodule.qconfig | null |
21,228 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
def create_grad_sampler_loader(
train_dataset, num_workers=16, grad_sampler_batch_size=10
):
return DataLoader(
train_dataset,
batch_size=grad_sampler_batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
) | null |
21,229 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
_LOGGER = logging.getLogger(__name__)
def evaluate(
model,
criterion,
data_loader,
device,
print_freq=100,
log_suffix="",
) -> utils.MetricLogger:
model.eval()
metric_logger = utils.MetricLogger(_LOGGER, delimiter=" ")
header = f"Test: {log_suffix}"
num_processed_samples = 0
with torch.no_grad():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
if isinstance(output, tuple):
output = output[0]
loss = criterion(output, target)
acc1, num_correct_1, acc5, num_correct_5 = utils.accuracy(
output, target, topk=(1, 5)
)
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(
acc1.item(), n=batch_size, total=num_correct_1
)
metric_logger.meters["acc5"].update(
acc5.item(), n=batch_size, total=num_correct_5
)
num_processed_samples += batch_size
# gather the stats from all processes
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
if (
hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
# See FIXME above
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, "
f"but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
metric_logger.synchronize_between_processes()
_LOGGER.info(
header
+ f"Acc@1 {metric_logger.acc1.global_avg:.3f}"
+ f"Acc@5 {metric_logger.acc5.global_avg:.3f}"
)
return metric_logger
def train_one_epoch(
model: torch.nn.Module,
criterion: torch.nn.Module,
optimizer: torch.optim.Optimizer,
data_loader: DataLoader,
data_loader_test: DataLoader,
device: torch.device,
epoch: int,
args,
log_metrics_fn: Callable[[str, utils.MetricLogger, int, int], None],
manager=None,
model_ema=None,
scaler=None,
) -> utils.MetricLogger:
accum_steps = args.gradient_accum_steps
model.train()
metric_logger = utils.MetricLogger(_LOGGER, delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter(
"imgs_per_sec", utils.SmoothedValue(window_size=10, fmt="{value}")
)
metric_logger.add_meter("loss", utils.SmoothedValue(window_size=accum_steps))
metric_logger.add_meter("acc1", utils.SmoothedValue(window_size=accum_steps))
metric_logger.add_meter("acc5", utils.SmoothedValue(window_size=accum_steps))
steps_accumulated = 0
num_optim_steps = 0
# initial zero grad for gradient accumulation
optimizer.zero_grad()
header = f"Epoch: [{epoch}]"
for image, target in metric_logger.log_every(
data_loader, args.logging_steps * accum_steps, header
):
start_time = time.time()
image, target = image.to(device), target.to(device)
with torch.cuda.amp.autocast(enabled=scaler is not None):
outputs = output = model(image)
if isinstance(output, tuple):
# NOTE: sparseml models return two things (logits & probs)
output = output[0]
loss = criterion(output, target)
if steps_accumulated % accum_steps == 0:
if manager is not None:
loss = manager.loss_update(
loss=loss,
module=model,
optimizer=optimizer,
epoch=epoch,
steps_per_epoch=len(data_loader) / accum_steps,
student_outputs=outputs,
student_inputs=image,
)
# first: do training to consume gradients
if scaler is not None:
scaler.scale(loss).backward()
if args.clip_grad_norm is not None:
# we should unscale the gradients of optimizer's assigned params
# if do gradient clipping
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if args.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
optimizer.step()
# zero grad here to start accumulating next set of gradients
optimizer.zero_grad()
num_optim_steps += 1
steps_accumulated += 1
if model_ema and num_optim_steps % args.model_ema_steps == 0:
model_ema.update_parameters(model)
if epoch < args.lr_warmup_epochs:
# Reset ema buffer to keep copying weights during warmup period
model_ema.n_averaged.fill_(0)
acc1, num_correct_1, acc5, num_correct_5 = utils.accuracy(
output, target, topk=(1, 5)
)
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(
acc1.item(), n=batch_size, total=num_correct_1
)
metric_logger.meters["acc5"].update(
acc5.item(), n=batch_size, total=num_correct_5
)
metric_logger.meters["imgs_per_sec"].update(
batch_size / (time.time() - start_time)
)
if args.eval_steps is not None and num_optim_steps % args.eval_steps == 0:
eval_metrics = evaluate(model, criterion, data_loader_test, device)
model.train()
log_metrics_fn("Test", eval_metrics, epoch, num_optim_steps)
if num_optim_steps % args.logging_steps == 0:
log_metrics_fn("Train", metric_logger, epoch, num_optim_steps)
return metric_logger | null |
21,230 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
_LOGGER = logging.getLogger(__name__)
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join(
"~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt"
)
cache_path = os.path.expanduser(cache_path)
return cache_path
try:
from torchvision import models as torchvision_models
except Exception:
torchvision_models = None
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU).
Heavily based on 'torch.utils.data.DistributedSampler'.
This is borrowed from the DeiT Repo:
https://github.com/facebookresearch/deit/blob/main/samplers.py
"""
def __init__(
self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0, repetitions=3
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(
math.ceil(len(self.dataset) * float(repetitions) / self.num_replicas)
)
self.total_size = self.num_samples * self.num_replicas
self.num_selected_samples = int(
math.floor(len(self.dataset) // 256 * 256 / self.num_replicas)
)
self.shuffle = shuffle
self.seed = seed
self.repetitions = repetitions
def __iter__(self):
if self.shuffle:
# Deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# Add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(self.repetitions)]
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# Subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[: self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
def load_data(traindir, valdir, args):
# Data loading code
_LOGGER.info("Loading data")
val_resize_size, val_crop_size, train_crop_size = (
args.val_resize_size,
args.val_crop_size,
args.train_crop_size,
)
interpolation = InterpolationMode(args.interpolation)
_LOGGER.info("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
_LOGGER.info(f"Loading dataset_train from {cache_path}")
dataset, _ = torch.load(cache_path)
else:
auto_augment_policy = getattr(args, "auto_augment", None)
random_erase_prob = getattr(args, "random_erase", 0.0)
ra_magnitude = args.ra_magnitude
augmix_severity = args.augmix_severity
dataset = torchvision.datasets.ImageFolder(
traindir,
presets.ClassificationPresetTrain(
crop_size=train_crop_size,
mean=args.rgb_mean,
std=args.rgb_std,
interpolation=interpolation,
auto_augment_policy=auto_augment_policy,
random_erase_prob=random_erase_prob,
ra_magnitude=ra_magnitude,
augmix_severity=augmix_severity,
),
)
if args.cache_dataset:
_LOGGER.info(f"Saving dataset_train to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
_LOGGER.info(f"Took {time.time() - st}")
_LOGGER.info("Loading validation data")
cache_path = _get_cache_path(valdir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
_LOGGER.info(f"Loading dataset_test from {cache_path}")
dataset_test, _ = torch.load(cache_path)
else:
preprocessing = presets.ClassificationPresetEval(
crop_size=val_crop_size,
mean=args.rgb_mean,
std=args.rgb_std,
resize_size=val_resize_size,
interpolation=interpolation,
)
dataset_test = torchvision.datasets.ImageFolder(
valdir,
preprocessing,
)
if args.cache_dataset:
_LOGGER.info(f"Saving dataset_test to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
_LOGGER.info("Creating data loaders")
if args.distributed:
if hasattr(args, "ra_sampler") and args.ra_sampler:
train_sampler = RASampler(dataset, shuffle=True, repetitions=args.ra_reps)
else:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(
dataset_test, shuffle=False
)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler | null |
21,231 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
try:
from torchvision import models as torchvision_models
except Exception:
torchvision_models = None
class ModelRegistry(object):
def available_keys() -> List[str]:
def create(
key: Optional[str] = None,
pretrained: Union[bool, str] = False,
pretrained_path: str = None,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
**kwargs,
) -> Union[Module, Tuple[Module, Optional[str]]]:
def create_zoo_model(
key: str,
pretrained: Union[bool, str] = True,
pretrained_dataset: str = None,
) -> Model:
def input_shape(key: str) -> Any:
def register(
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str = "sparseml",
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
def decorator(constructor_func):
def register_wrapped_model_constructor(
wrapped_constructor: Callable,
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str,
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
def _registered_wrapper(
key: str,
constructor_func: Callable,
):
def wrapper(
pretrained_path: str = None,
pretrained: Union[bool, str] = False,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
*args,
**kwargs,
):
def torch_distributed_zero_first(local_rank: Optional[int]):
def load_model(
path: str,
model: Module,
strict: bool = False,
ignore_error_tensors: List[str] = None,
fix_data_parallel: bool = True,
):
def _create_model(
arch_key: Optional[str] = None,
local_rank=None,
pretrained: Optional[bool] = False,
checkpoint_path: Optional[str] = None,
pretrained_dataset: Optional[str] = None,
num_classes=None,
):
if not arch_key or arch_key in ModelRegistry.available_keys():
with torch_distributed_zero_first(local_rank):
model = ModelRegistry.create(
key=arch_key,
pretrained=pretrained,
pretrained_path=checkpoint_path,
pretrained_dataset=pretrained_dataset,
num_classes=num_classes,
)
if isinstance(model, tuple):
model, arch_key = model
elif arch_key in torchvision.models.__dict__:
# fall back to torchvision
# load initial, untrained model with correct number of classes
model = torchvision.models.__dict__[arch_key](num_classes=num_classes)
if pretrained is not None:
# in transfer learning cases, final FC layer may not match dimensions
# load base pretrained model and laod state dict with strict=False
pretrained_model = torchvision.models.__dict__[arch_key](
pretrained=pretrained
)
if (
getattr(pretrained_model, "classifier", None)
and pretrained_model.classifier.out_features != num_classes
):
del pretrained_model.classifier
model.load_state_dict(pretrained_model.state_dict(), strict=False)
if checkpoint_path is not None:
load_model(
checkpoint_path,
model,
strict=True,
ignore_error_tensors=[
"classifier.fc.weight",
"classifier.fc.bias",
"classifier.1.weight",
"classifier.1.bias",
"fc.weight",
"fc.bias",
"classifier.weight",
"classifier.bias",
],
)
else:
raise ValueError(
f"Unable to find {arch_key} in ModelRegistry or in torchvision.models"
)
return model, arch_key | null |
21,232 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
def _get_lr_scheduler(args, optimizer, checkpoint=None, manager=None):
lr_scheduler = None
if manager is not None and manager.learning_rate_modifiers:
lr_scheduler = None
else:
args.lr_scheduler = args.lr_scheduler.lower()
if args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma
)
elif args.lr_scheduler == "cosineannealinglr":
main_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=args.epochs - args.lr_warmup_epochs,
eta_min=args.lr_min,
)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma=args.lr_gamma
)
else:
raise RuntimeError(
f"Invalid lr scheduler '{args.lr_scheduler}'. "
"Only StepLR, CosineAnnealingLR and ExponentialLR "
"are supported."
)
if args.lr_warmup_epochs > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer,
start_factor=args.lr_warmup_decay,
total_iters=args.lr_warmup_epochs,
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer,
factor=args.lr_warmup_decay,
total_iters=args.lr_warmup_epochs,
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. "
"Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer,
schedulers=[warmup_lr_scheduler, main_lr_scheduler],
milestones=[args.lr_warmup_epochs],
)
else:
lr_scheduler = main_lr_scheduler
if args.resume and checkpoint:
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
return lr_scheduler | null |
21,233 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
def download_framework_model_by_recipe_type(
zoo_model: Model, recipe_name: Optional[str] = None, model_suffix: str = "pth"
) -> str:
def _load_checkpoint(path):
if path.startswith("zoo:"):
path = download_framework_model_by_recipe_type(Model(path))
return torch.load(path, map_location="cpu") | null |
21,234 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
def _save_checkpoints(
epoch, output_dir, file_names, checkpoint, train_metrics, eval_metrics
):
metrics = "\n".join(
[
f"epoch: {epoch}",
f"__loss__: {train_metrics.loss.global_avg}",
f"top1acc: {eval_metrics.acc1.global_avg}",
f"top5acc: {eval_metrics.acc5.global_avg}",
]
)
for fname in file_names:
utils.save_on_master(checkpoint, os.path.join(output_dir, fname))
if utils.is_main_process():
with open(
os.path.join(output_dir, fname.replace(".pth", ".txt")), "w"
) as fp:
fp.write(metrics) | null |
21,235 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
_ARGUMENTS_ERROR = (
"Deprecated arguments found: {}. "
"Please see --help for new arguments.\n"
"The old script can be accessed with "
"`sparseml.pytorch.image_classification.train`"
)
def _deprecate_old_arguments(f):
def new_func(*args, **kwargs):
if "--recipe-path" in sys.argv:
raise ValueError(_ARGUMENTS_ERROR.format("--recipe-path"))
return f(*args, **kwargs)
return update_wrapper(new_func, f) | null |
21,236 | import datetime
import logging
import math
import os
import sys
import time
import warnings
from functools import update_wrapper
from types import SimpleNamespace
from typing import Callable, Optional
import torch
import torch.utils.data
import torchvision
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader, default_collate
from torchvision.transforms.functional import InterpolationMode
import click
from sparseml.optim.helpers import load_recipe_yaml_str
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets, transforms, utils
from sparseml.pytorch.torchvision.sampler import RASampler
from sparseml.pytorch.utils.helpers import (
default_device,
download_framework_model_by_recipe_type,
torch_distributed_zero_first,
)
from sparseml.pytorch.utils.logger import (
LoggerManager,
PythonLogger,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.pytorch.utils.model import load_model, model_to_device
from sparsezoo import Model
def main(args):
if args.resume is not None and args.checkpoint_path is not None:
raise ValueError(
"Only one of --resume or --checkpoint-path can be specified, not both."
)
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
if not utils.is_main_process():
_LOGGER.disabled = True
_LOGGER.info(args)
if not args.device:
args.device = default_device()
device = args.device
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
train_dir = os.path.join(args.dataset_path, "train")
val_dir = os.path.join(args.dataset_path, "val")
dataset, dataset_test, train_sampler, test_sampler = load_data(
train_dir, val_dir, args
)
collate_fn = None
num_classes = len(dataset.classes)
mixup_transforms = []
if args.mixup_alpha > 0.0:
mixup_transforms.append(
transforms.RandomMixup(num_classes, p=1.0, alpha=args.mixup_alpha)
)
if args.cutmix_alpha > 0.0:
mixup_transforms.append(
transforms.RandomCutmix(num_classes, p=1.0, alpha=args.cutmix_alpha)
)
if mixup_transforms:
mixupcutmix = torchvision.transforms.RandomChoice(mixup_transforms)
def collate_fn(batch):
return mixupcutmix(*default_collate(batch))
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=args.batch_size,
sampler=test_sampler,
num_workers=args.workers,
pin_memory=True,
)
_LOGGER.info("Creating model")
local_rank = int(os.environ["LOCAL_RANK"]) if args.distributed else None
model, arch_key = _create_model(
arch_key=args.arch_key,
local_rank=local_rank,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint_path,
pretrained_dataset=args.pretrained_dataset,
num_classes=num_classes,
)
if args.distill_teacher not in ["self", "disable", None]:
_LOGGER.info("Instantiating teacher")
distill_teacher, _ = _create_model(
arch_key=args.teacher_arch_key,
local_rank=local_rank,
pretrained=True, # teacher is always pretrained
pretrained_dataset=args.pretrained_teacher_dataset,
checkpoint_path=args.distill_teacher,
num_classes=num_classes,
)
else:
distill_teacher = args.distill_teacher
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if version.parse(torch.__version__) >= version.parse("1.10"):
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)
elif args.label_smoothing > 0:
raise ValueError(
f"`label_smoothing` not supported for {torch.__version__}, "
f"try upgrading to at-least 1.10"
)
else:
criterion = nn.CrossEntropyLoss()
custom_keys_weight_decay = []
if args.bias_weight_decay is not None:
custom_keys_weight_decay.append(("bias", args.bias_weight_decay))
if args.transformer_embedding_decay is not None:
for key in [
"class_token",
"position_embedding",
"relative_position_bias_table",
]:
custom_keys_weight_decay.append((key, args.transformer_embedding_decay))
parameters = utils.set_weight_decay(
model,
args.weight_decay,
norm_weight_decay=args.norm_weight_decay,
custom_keys_weight_decay=custom_keys_weight_decay
if len(custom_keys_weight_decay) > 0
else None,
)
opt_name = args.opt.lower()
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov="nesterov" in opt_name,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
eps=0.0316,
alpha=0.9,
)
elif opt_name == "adamw":
optimizer = torch.optim.AdamW(
parameters, lr=args.lr, weight_decay=args.weight_decay
)
elif opt_name == "adam":
optimizer = torch.optim.Adam(
parameters, lr=args.lr, weight_decay=args.weight_decay
)
else:
raise RuntimeError(
f"Invalid optimizer {args.opt}. Only SGD, RMSprop and AdamW are supported."
)
scaler = torch.cuda.amp.GradScaler() if args.amp else None
model_ema = None
if args.model_ema:
# Decay adjustment that aims to keep the decay independent from
# other hyper-parameters originally proposed at:
# https://github.com/facebookresearch/pycls/blob/f8cd9627/pycls/core/net.py#L123
#
# total_ema_updates =
# (Dataset_size / n_GPUs) * epochs / (batch_size_per_gpu * EMA_steps)
# We consider constant = Dataset_size for a given dataset/setup and ommit it.
# Thus:
# adjust = 1 / total_ema_updates ~=
# n_GPUs * batch_size_per_gpu * EMA_steps / epochs
adjust = args.world_size * args.batch_size * args.model_ema_steps / args.epochs
alpha = 1.0 - args.model_ema_decay
alpha = min(1.0, alpha * adjust)
model_ema = utils.ExponentialMovingAverage(
model, device=model.device, decay=1.0 - alpha
)
manager = checkpoint_manager = None
if args.checkpoint_path:
checkpoint = _load_checkpoint(args.checkpoint_path)
# restore state from prior recipe
manager = (
ScheduledModifierManager.from_yaml(
args.recipe, recipe_variables=args.recipe_args
)
if args.recipe is not None
else None
)
checkpoint_manager = (
ScheduledModifierManager.from_yaml(checkpoint["recipe"])
if "recipe" in checkpoint and checkpoint["recipe"] is not None
else None
)
elif args.resume:
checkpoint = _load_checkpoint(args.resume)
if "recipe" in checkpoint:
# NOTE: override manager with the checkpoint's manager
manager = ScheduledModifierManager.from_yaml(checkpoint["recipe"])
checkpoint_manager = None
else:
raise ValueError("Flag --resume is set but checkpoint does not have recipe")
# NOTE: override start epoch
args.start_epoch = checkpoint["epoch"] + 1
else:
checkpoint = None
manager = (
ScheduledModifierManager.from_yaml(
args.recipe, recipe_variables=args.recipe_args
)
if args.recipe is not None
else None
)
checkpoint_manager = None
# load params
if checkpoint is not None:
if "optimizer" in checkpoint and not args.test_only:
if args.resume:
optimizer.load_state_dict(checkpoint["optimizer"])
else:
warnings.warn(
"Optimizer state dict not loaded from checkpoint. Unless run is "
"resumed with the --resume arg, the optimizer will start from a "
"fresh state"
)
if model_ema and "model_ema" in checkpoint:
model_ema.load_state_dict(checkpoint["model_ema"])
if scaler and "scaler" in checkpoint:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
# We disable the cudnn benchmarking because it can
# noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if model_ema:
evaluate(
model_ema,
criterion,
data_loader_test,
device,
print_freq=args.logging_steps,
log_suffix="EMA",
)
else:
evaluate(
model,
criterion,
data_loader_test,
device,
print_freq=args.logging_steps,
)
return
if utils.is_main_process():
loggers = [
PythonLogger(),
]
try:
loggers.append(TensorBoardLogger(log_path=args.output_dir))
except (ModuleNotFoundError, ImportError):
warnings.warn("Unable to import tensorboard for logging")
try:
config = vars(args)
if manager is not None:
config["manager"] = str(manager)
loggers.append(WANDBLogger(init_kwargs=dict(config=config)))
except ImportError:
warnings.warn("Unable to import wandb for logging")
logger = LoggerManager(loggers)
else:
logger = LoggerManager(log_python=False)
if args.recipe is not None:
base_path = os.path.join(args.output_dir, "original_recipe.yaml")
with open(base_path, "w") as fp:
fp.write(load_recipe_yaml_str(args.recipe))
logger.save(base_path)
full_path = os.path.join(args.output_dir, "final_recipe.yaml")
manager.save(full_path)
logger.save(full_path)
steps_per_epoch = len(data_loader) / args.gradient_accum_steps
def log_metrics(tag: str, metrics: utils.MetricLogger, epoch: int, epoch_step: int):
step = int(epoch * steps_per_epoch + epoch_step)
for metric_name, smoothed_value in metrics.meters.items():
logger.log_scalar(
f"{tag}/{metric_name}", smoothed_value.global_avg, step=step
)
recipe_kwargs = {}
# TODO: What is the right logic to check if we need a grad sampler here? It seems
# that for YOLO the grad sampler is always created, whether or not it's needed. See
# https://github.com/neuralmagic/sparseml/blob/b73a173ff89c3bb524dcc0a1f7a16a3109a234a1/src/sparseml/yolov8/trainers.py#L699
grad_sampler_loader = create_grad_sampler_loader(dataset)
def data_loader_builder(**kwargs):
while True:
for input, target in grad_sampler_loader:
input, target = input.to(device).float(), target.to(device)
yield [input], {}, target
recipe_kwargs["grad_sampler"] = {
"data_loader_builder": data_loader_builder,
"loss_function": criterion,
}
if manager is not None:
manager.initialize(
model,
epoch=args.start_epoch,
loggers=logger,
distillation_teacher=distill_teacher,
**recipe_kwargs,
)
step_wrapper = manager.modify(
model,
optimizer,
steps_per_epoch=steps_per_epoch,
epoch=args.start_epoch,
wrap_optim=scaler,
**recipe_kwargs,
)
if scaler is None:
optimizer = step_wrapper
else:
scaler = step_wrapper
lr_scheduler = _get_lr_scheduler(
args, optimizer, checkpoint=checkpoint, manager=manager
)
if args.distributed:
ddp = True
device = local_rank
else:
ddp = False
model, device, _ = model_to_device(model, device, ddp)
if distill_teacher is not None:
distill_teacher, _, _ = model_to_device(distill_teacher, device, ddp)
model_without_ddp = model
if args.distributed:
model_without_ddp = model.module
best_top1_acc = -math.inf
_LOGGER.info("Start training")
start_time = time.time()
max_epochs = manager.max_epochs if manager is not None else args.epochs
for epoch in range(args.start_epoch, max_epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if manager is not None and manager.qat_active(epoch=epoch):
if scaler is not None:
scaler._enabled = False
model_ema = None
train_metrics = train_one_epoch(
model,
criterion,
optimizer,
data_loader,
data_loader_test,
device,
epoch,
args,
log_metrics,
manager=manager,
model_ema=model_ema,
scaler=scaler,
)
log_metrics("Train", train_metrics, epoch, steps_per_epoch)
if lr_scheduler:
lr_scheduler.step()
eval_metrics = evaluate(model, criterion, data_loader_test, device)
log_metrics("Test", eval_metrics, epoch, steps_per_epoch)
top1_acc = eval_metrics.acc1.global_avg
if model_ema:
ema_eval_metrics = evaluate(
model_ema,
criterion,
data_loader_test,
device,
log_suffix="EMA",
)
log_metrics("Test/EMA", ema_eval_metrics, epoch, steps_per_epoch)
is_new_best = epoch >= args.save_best_after and top1_acc > best_top1_acc
if is_new_best:
best_top1_acc = top1_acc
if args.output_dir:
checkpoint = {
"state_dict": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"args": args,
"arch_key": arch_key,
}
if lr_scheduler:
checkpoint["lr_scheduler"] = lr_scheduler.state_dict()
if model_ema:
checkpoint["model_ema"] = model_ema.state_dict()
if scaler:
checkpoint["scaler"] = scaler.state_dict()
if checkpoint_manager is not None:
checkpoint["epoch"] = (
-1
if epoch == max_epochs - 1
else epoch + checkpoint_manager.max_epochs
)
checkpoint["recipe"] = str(
ScheduledModifierManager.compose_staged(checkpoint_manager, manager)
)
else:
checkpoint["epoch"] = -1 if epoch == max_epochs - 1 else epoch
if manager is not None:
checkpoint["recipe"] = str(manager)
file_names = ["checkpoint.pth"]
if is_new_best:
file_names.append("checkpoint-best.pth")
_save_checkpoints(
epoch,
args.output_dir,
file_names,
checkpoint,
train_metrics,
eval_metrics,
)
if manager is not None:
manager.finalize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
_LOGGER.info(f"Training time {total_time_str}")
_ARGUMENTS_ERROR = (
"Deprecated arguments found: {}. "
"Please see --help for new arguments.\n"
"The old script can be accessed with "
"`sparseml.pytorch.image_classification.train`"
)
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli(ctx, **kwargs)` to solve the following problem:
PyTorch classification training
Here is the function:
def cli(ctx, **kwargs):
"""
PyTorch classification training
"""
if len(ctx.args) > 0:
raise ValueError(_ARGUMENTS_ERROR.format(ctx.args))
main(SimpleNamespace(**kwargs)) | PyTorch classification training |
21,237 | import json
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import torchvision
from torch.nn import Module
from torch.utils.data import DataLoader
from torchvision.transforms.functional import InterpolationMode
from tqdm import tqdm
import click
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.torchvision import presets
from sparseml.pytorch.utils import ModuleExporter
from sparseml.pytorch.utils.model import load_model
_LOGGER = logging.getLogger(__name__)
def export(
model: Module,
val_loader: DataLoader,
save_dir: str,
num_samples: int,
onnx_opset: int,
convert_qat: bool,
labels_to_class_mapping: Optional[Union[str, Dict[int, str]]],
) -> None:
exporter = ModuleExporter(model, save_dir)
export_samples = num_samples > 0
if num_samples < 0:
num_samples = 1
for batch, (x, label) in tqdm(
enumerate(val_loader), desc="Exporting samples", total=num_samples
):
if batch >= num_samples:
break
if export_samples:
exporter.export_samples(
sample_batches=[x], sample_labels=[label], exp_counter=batch
)
_LOGGER.info(f"exporting onnx in {save_dir}")
exporter.export_onnx(x, opset=onnx_opset, convert_qat=convert_qat)
exporter.create_deployment_folder(labels_to_class_mapping=labels_to_class_mapping) | null |
21,238 | import copy
import datetime
import errno
import hashlib
import logging
import os
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size() | null |
21,239 | import copy
import datetime
import errno
import hashlib
import logging
import os
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
21,240 | import copy
import datetime
import errno
import hashlib
import logging
import os
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `average_checkpoints` function. Write a Python function `def average_checkpoints(inputs)` to solve the following problem:
Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 Args: inputs (List[str]): An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
Here is the function:
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Original implementation taken from:
https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16
Args:
inputs (List[str]): An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
f"For checkpoint {f}, expected list of params: "
f"{params_keys}, but found: {model_params_keys}"
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state | Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 Args: inputs (List[str]): An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. |
21,241 | import copy
import datetime
import errno
import hashlib
import logging
import os
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `store_model_weights` function. Write a Python function `def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True)` to solve the following problem:
This method can be used to prepare weights files for new models. It receives as input a model architecture and a checkpoint from the training script and produces a file with the weights ready for release. Examples: from torchvision import models as M # Classification model = M.mobilenet_v3_large(weights=None) print(store_model_weights(model, './class.pth')) # Quantized Classification model = M.quantization.mobilenet_v3_large(weights=None, quantize=False) model.fuse_model(is_qat=True) model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack') _ = torch.ao.quantization.prepare_qat(model, inplace=True) print(store_model_weights(model, './qat.pth')) # Object Detection model = M.detection.fasterrcnn_mobilenet_v3_large_fpn( weights=None, weights_backbone=None) print(store_model_weights(model, './obj.pth')) # Segmentation model = M.segmentation.deeplabv3_mobilenet_v3_large( weights=None, weights_backbone=None, aux_loss=True) print(store_model_weights(model, './segm.pth', strict=False)) Args: model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. checkpoint_path (str): The path of the checkpoint we will load. checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. Default: "model". strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: output_path (str): The location where the weights are saved.
Here is the function:
def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True):
"""
This method can be used to prepare weights files for new models. It receives as
input a model architecture and a checkpoint from the training script and produces
a file with the weights ready for release.
Examples:
from torchvision import models as M
# Classification
model = M.mobilenet_v3_large(weights=None)
print(store_model_weights(model, './class.pth'))
# Quantized Classification
model = M.quantization.mobilenet_v3_large(weights=None, quantize=False)
model.fuse_model(is_qat=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack')
_ = torch.ao.quantization.prepare_qat(model, inplace=True)
print(store_model_weights(model, './qat.pth'))
# Object Detection
model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(
weights=None, weights_backbone=None)
print(store_model_weights(model, './obj.pth'))
# Segmentation
model = M.segmentation.deeplabv3_mobilenet_v3_large(
weights=None, weights_backbone=None, aux_loss=True)
print(store_model_weights(model, './segm.pth', strict=False))
Args:
model (pytorch.nn.Module): The model on which the weights
will be loaded for validation purposes.
checkpoint_path (str): The path of the checkpoint we will load.
checkpoint_key (str, optional): The key of the checkpoint where
the model weights are stored.
Default: "model".
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
output_path (str): The location where the weights are saved.
"""
# Store the new model next to the checkpoint_path
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
# Deep copy to avoid side-effects on the model object.
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the weights to the model to validate that everything works
# and remove unnecessary weights (such as auxiliaries, etc)
if checkpoint_key == "model_ema":
del checkpoint[checkpoint_key]["n_averaged"]
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
checkpoint[checkpoint_key], "module."
)
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
os.replace(tmp_path, output_path)
return output_path | This method can be used to prepare weights files for new models. It receives as input a model architecture and a checkpoint from the training script and produces a file with the weights ready for release. Examples: from torchvision import models as M # Classification model = M.mobilenet_v3_large(weights=None) print(store_model_weights(model, './class.pth')) # Quantized Classification model = M.quantization.mobilenet_v3_large(weights=None, quantize=False) model.fuse_model(is_qat=True) model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack') _ = torch.ao.quantization.prepare_qat(model, inplace=True) print(store_model_weights(model, './qat.pth')) # Object Detection model = M.detection.fasterrcnn_mobilenet_v3_large_fpn( weights=None, weights_backbone=None) print(store_model_weights(model, './obj.pth')) # Segmentation model = M.segmentation.deeplabv3_mobilenet_v3_large( weights=None, weights_backbone=None, aux_loss=True) print(store_model_weights(model, './segm.pth', strict=False)) Args: model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. checkpoint_path (str): The path of the checkpoint we will load. checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. Default: "model". strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: output_path (str): The location where the weights are saved. |
21,242 | import copy
import datetime
import errno
import hashlib
import logging
import os
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
def set_weight_decay(
model: torch.nn.Module,
weight_decay: float,
norm_weight_decay: Optional[float] = None,
norm_classes: Optional[List[type]] = None,
custom_keys_weight_decay: Optional[List[Tuple[str, float]]] = None,
):
if not norm_classes:
norm_classes = [
torch.nn.modules.batchnorm._BatchNorm,
torch.nn.LayerNorm,
torch.nn.GroupNorm,
torch.nn.modules.instancenorm._InstanceNorm,
torch.nn.LocalResponseNorm,
]
norm_classes = tuple(norm_classes)
params = {
"other": [],
"norm": [],
}
params_weight_decay = {
"other": weight_decay,
"norm": norm_weight_decay,
}
custom_keys = []
if custom_keys_weight_decay is not None:
for key, weight_decay in custom_keys_weight_decay:
params[key] = []
params_weight_decay[key] = weight_decay
custom_keys.append(key)
def _add_params(module, prefix=""):
for name, p in module.named_parameters(recurse=False):
if not p.requires_grad:
continue
is_custom_key = False
for key in custom_keys:
target_name = (
f"{prefix}.{name}" if prefix != "" and "." in key else name
)
if key == target_name:
params[key].append(p)
is_custom_key = True
break
if not is_custom_key:
if norm_weight_decay is not None and isinstance(module, norm_classes):
params["norm"].append(p)
else:
params["other"].append(p)
for child_name, child_module in module.named_children():
child_prefix = f"{prefix}.{child_name}" if prefix != "" else child_name
_add_params(child_module, prefix=child_prefix)
_add_params(model)
param_groups = []
for key in params:
if len(params[key]) > 0:
param_groups.append(
{"params": params[key], "weight_decay": params_weight_decay[key]}
)
return param_groups | null |
21,243 | import torch
from packaging import version
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def _default_opset() -> int:
torch_version = version.parse(torch.__version__)
if torch_version < version.parse("1.3"):
return 9
if torch_version < version.parse("1.10.0"):
return 11
return 14 | null |
21,244 | import functools
import logging
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from pathlib import Path
from typing import Dict, List, Union
import numpy as np
import torch
from sparseml.pytorch.utils import default_device
from sparseml.utils.datasets import IMAGENET_RGB_MEANS, IMAGENET_RGB_STDS
LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `timeit` function. Write a Python function `def timeit(func)` to solve the following problem:
Decorator to time a function
Here is the function:
def timeit(func):
"""Decorator to time a function"""
@functools.wraps(func)
def time_closure(*args, **kwargs):
"""Function that actually does the timing"""
start = time.perf_counter()
result = func(*args, **kwargs)
time_elapsed = time.perf_counter() - start
LOGGER.debug(f"Function: {func.__name__}, Time: {time_elapsed}")
return result
return time_closure | Decorator to time a function |
21,245 | import os
import torch
from sparseml.pytorch.datasets.detection.helpers import (
AnnotatedImageTransforms,
bounding_box_and_labels_to_yolo_fmt,
random_horizontal_flip_image_and_annotations,
ssd_random_crop_image_and_annotations,
)
from sparseml.pytorch.datasets.registry import DatasetRegistry
from sparseml.pytorch.utils import DefaultBoxes, get_default_boxes_300
from sparseml.utils.datasets import (
IMAGENET_RGB_MEANS,
IMAGENET_RGB_STDS,
default_dataset_path,
)
_VOC_CLASS_NAME_TO_ID = {
"aeroplane": 1,
"bicycle": 2,
"bird": 3,
"boat": 4,
"bottle": 5,
"bus": 6,
"car": 7,
"cat": 8,
"chair": 9,
"cow": 10,
"diningtable": 11,
"dog": 12,
"horse": 13,
"motorbike": 14,
"person": 15,
"pottedplant": 16,
"sheep": 17,
"sofa": 18,
"train": 19,
"tvmonitor": 20,
}
def _extract_bounding_box_and_labels(image, annotations):
# returns bounding boxes in ltrb format scaled to [0, 1] and labels
boxes = []
labels = []
box_objects = annotations["annotation"]["object"]
if isinstance(box_objects, dict):
box_objects = [box_objects]
for annotation in box_objects:
boxes.append(
[
float(annotation["bndbox"]["xmin"]),
float(annotation["bndbox"]["ymin"]),
float(annotation["bndbox"]["xmax"]),
float(annotation["bndbox"]["ymax"]),
]
)
labels.append(_VOC_CLASS_NAME_TO_ID[annotation["name"]])
boxes = torch.Tensor(boxes).float()
labels = torch.Tensor(labels).long()
# scale boxes to [0, 1]
boxes[:, [0, 2]] /= image.width # scale width dimensions
boxes[:, [1, 3]] /= image.height # scale height dimensions
return boxes, labels | null |
21,246 | import os
import torch
import urllib.request as request
import zipfile
from sparseml.pytorch.datasets.detection.helpers import (
AnnotatedImageTransforms,
bounding_box_and_labels_to_yolo_fmt,
random_horizontal_flip_image_and_annotations,
ssd_random_crop_image_and_annotations,
)
from sparseml.pytorch.datasets.registry import DatasetRegistry
from sparseml.pytorch.utils import DefaultBoxes, get_default_boxes_300
from sparseml.utils.datasets import (
IMAGENET_RGB_MEANS,
IMAGENET_RGB_STDS,
default_dataset_path,
)
class CocoDetectionDataset(CocoDetection):
"""
Wrapper for the Coco Detection dataset to apply standard transforms
for input to detection models. Will return the processed image along
with a tuple of its bounding boxes in ltrb format and labels for each box.
If a DefaultBoxes object is provided, then will encode the box and labels
using that object returning a tensor of offsets to the default boxes and
labels for those boxes and return a three item tuple of the encoded boxes,
labels, and their original values.
:param root: The root folder to find the dataset at, if not found will
download here if download=True
:param train: True if this is for the training distribution,
False for the validation
:param rand_trans: True to apply RandomCrop and RandomHorizontalFlip to the data,
False otherwise
:param download: True to download the dataset, False otherwise.
:param year: The dataset year, supports years 2014, 2015, and 2017.
:param image_size: the size of the image to output from the dataset
:param preprocessing_type: Type of standard pre-processing to perform.
Options are 'yolo', 'ssd', or None. None defaults to just image normalization
with no extra processing of bounding boxes.
:param default_boxes: DefaultBoxes object used to encode bounding boxes and label
for model loss computation for SSD models. Only used when preprocessing_type=
'ssd'. Default object represents the default boxes used in standard SSD 300
implementation.
"""
def __init__(
self,
root: str = default_dataset_path("coco-detection"),
train: bool = False,
rand_trans: bool = False,
download: bool = True,
year: str = "2017",
image_size: int = 300,
preprocessing_type: str = None,
default_boxes: DefaultBoxes = None,
):
if torchvision_import_error is not None:
raise torchvision_import_error
if pycocotools is None:
raise ValueError("pycocotools is not installed, please install to use")
if preprocessing_type not in [None, "yolo", "ssd"]:
raise ValueError(
"preprocessing type {} not supported, valid values are: {}".format(
preprocessing_type, [None, "yolo", "ssd"]
)
)
root = os.path.join(os.path.abspath(os.path.expanduser(root)), str(year))
if train:
data_path = "{root}/train{year}".format(root=root, year=year)
annotation_path = "{root}/annotations/instances_train{year}.json".format(
root=root, year=year
)
else:
data_path = "{root}/val{year}".format(root=root, year=year)
annotation_path = "{root}/annotations/instances_val{year}.json".format(
root=root, year=year
)
if not os.path.isdir(data_path) and download:
dataset_type = "train" if train else "val"
zip_url = "{COCO_IMAGE_ZIP_ROOT}/{dataset_type}{year}.zip".format(
COCO_IMAGE_ZIP_ROOT=COCO_IMAGE_ZIP_ROOT,
dataset_type=dataset_type,
year=year,
)
zip_path = os.path.join(root, "images.zip")
annotation_url = (
"{COCO_ANNOTATION_ZIP_ROOT}/annotations_trainval{year}.zip".format(
COCO_ANNOTATION_ZIP_ROOT=COCO_ANNOTATION_ZIP_ROOT, year=year
)
)
annotation_zip_path = os.path.join(root, "annotation.zip")
os.makedirs(root, exist_ok=True)
print("Downloading coco dataset")
print("Downloading image files...")
request.urlretrieve(zip_url, zip_path)
print("Unzipping image files...")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(root)
print("Downloading annotations files...")
request.urlretrieve(annotation_url, annotation_zip_path)
print("Unzipping annotation files...")
with zipfile.ZipFile(annotation_zip_path, "r") as zip_ref:
zip_ref.extractall(root)
elif not os.path.isdir(root):
raise ValueError(
f"Coco Dataset Path {root} does not exist. Please download dataset."
)
yolo_preprocess = preprocessing_type == "yolo"
trans = [
# process annotations
lambda img, ann: (
img,
_extract_bounding_box_and_labels(img, ann, yolo_preprocess),
),
]
if rand_trans:
# add random crop, flip, and jitter to pipeline
jitter_fn = ColorJitter(
brightness=0.125, contrast=0.5, saturation=0.5, hue=0.05
)
trans.extend(
[
# Random cropping as implemented in SSD paper
ssd_random_crop_image_and_annotations,
# random horizontal flip
random_horizontal_flip_image_and_annotations,
# image color jitter
lambda img, ann: (jitter_fn(img), ann),
]
)
trans.extend(
[
# resize image
lambda img, ann: (
torch_functional.resize(img, (image_size, image_size)),
ann,
),
# Convert image to tensor
lambda img, ann: (torch_functional.to_tensor(img), ann),
]
)
# Normalize image except for yolo preprocessing
if not yolo_preprocess:
trans.append(
lambda img, ann: (
torch_functional.normalize(
img, IMAGENET_RGB_MEANS, IMAGENET_RGB_STDS
),
ann,
)
)
if preprocessing_type == "ssd":
default_boxes = default_boxes or get_default_boxes_300()
# encode the bounding boxes and labels with the default boxes
trans.append(
lambda img, ann: (
img,
(
*default_boxes.encode_image_box_labels(*ann),
ann,
), # encoded_boxes, encoded_labels, original_annotations
)
)
elif yolo_preprocess:
trans.append(
lambda img, ann: (
img,
(bounding_box_and_labels_to_yolo_fmt(ann), ann),
)
)
super().__init__(
root=data_path,
annFile=annotation_path,
transforms=AnnotatedImageTransforms(trans),
)
self._default_boxes = default_boxes
def default_boxes(self) -> DefaultBoxes:
"""
:return: The DefaultBoxes object used to encode this datasets bounding boxes
"""
return self._default_boxes
key=["coco_2017_yolo", "coco_detection_yolo", "coco_yolo"],
attributes={
"transform_means": IMAGENET_RGB_MEANS,
"transform_stds": IMAGENET_RGB_STDS,
"num_classes": 80,
},
The provided code snippet includes necessary dependencies for implementing the `coco_2017_yolo` function. Write a Python function `def coco_2017_yolo( root: str = default_dataset_path("coco-detection"), train: bool = False, rand_trans: bool = False, download: bool = True, year: str = "2017", image_size: int = 640, preprocessing_type: str = "yolo", )` to solve the following problem:
Wrapper for COCO detection dataset with Dataset Registry values properly created for a Yolo model trained on 80 classes. :param root: The root folder to find the dataset at, if not found will download here if download=True :param train: True if this is for the training distribution, False for the validation :param rand_trans: True to apply RandomCrop and RandomHorizontalFlip to the data, False otherwise :param download: True to download the dataset, False otherwise. :param year: Only valid option is 2017. default is 2017. :param image_size: the size of the image to output from the dataset :param preprocessing_type: Type of standard pre-processing to perform. Only valid option is 'yolo'. Default is 'yolo'
Here is the function:
def coco_2017_yolo(
root: str = default_dataset_path("coco-detection"),
train: bool = False,
rand_trans: bool = False,
download: bool = True,
year: str = "2017",
image_size: int = 640,
preprocessing_type: str = "yolo",
):
"""
Wrapper for COCO detection dataset with Dataset Registry values properly
created for a Yolo model trained on 80 classes.
:param root: The root folder to find the dataset at, if not found will
download here if download=True
:param train: True if this is for the training distribution,
False for the validation
:param rand_trans: True to apply RandomCrop and RandomHorizontalFlip to the data,
False otherwise
:param download: True to download the dataset, False otherwise.
:param year: Only valid option is 2017. default is 2017.
:param image_size: the size of the image to output from the dataset
:param preprocessing_type: Type of standard pre-processing to perform.
Only valid option is 'yolo'. Default is 'yolo'
"""
if preprocessing_type != "yolo":
raise ValueError(
"Only valid preprocessing type for Coco 2017 Yolo dataset is 'yolo'"
" received: {}".foramt(preprocessing_type)
)
if int(year) != 2017:
raise ValueError(
"Only valid year type for Coco 2017 Yolo dataset is 2017"
" received: {}".foramt(year)
)
return CocoDetectionDataset(
root, train, rand_trans, download, year, image_size, "yolo"
) | Wrapper for COCO detection dataset with Dataset Registry values properly created for a Yolo model trained on 80 classes. :param root: The root folder to find the dataset at, if not found will download here if download=True :param train: True if this is for the training distribution, False for the validation :param rand_trans: True to apply RandomCrop and RandomHorizontalFlip to the data, False otherwise :param download: True to download the dataset, False otherwise. :param year: Only valid option is 2017. default is 2017. :param image_size: the size of the image to output from the dataset :param preprocessing_type: Type of standard pre-processing to perform. Only valid option is 'yolo'. Default is 'yolo' |
21,247 | import os
import torch
import urllib.request as request
import zipfile
from sparseml.pytorch.datasets.detection.helpers import (
AnnotatedImageTransforms,
bounding_box_and_labels_to_yolo_fmt,
random_horizontal_flip_image_and_annotations,
ssd_random_crop_image_and_annotations,
)
from sparseml.pytorch.datasets.registry import DatasetRegistry
from sparseml.pytorch.utils import DefaultBoxes, get_default_boxes_300
from sparseml.utils.datasets import (
IMAGENET_RGB_MEANS,
IMAGENET_RGB_STDS,
default_dataset_path,
)
_COCO_CLASSES_90_to_80 = {
1: 0,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 8,
10: 9,
11: 10,
13: 11,
14: 12,
15: 13,
16: 14,
17: 15,
18: 16,
19: 17,
20: 18,
21: 19,
22: 20,
23: 21,
24: 22,
25: 23,
27: 24,
28: 25,
31: 26,
32: 27,
33: 28,
34: 29,
35: 30,
36: 31,
37: 32,
38: 33,
39: 34,
40: 35,
41: 36,
42: 37,
43: 38,
44: 39,
46: 40,
47: 41,
48: 42,
49: 43,
50: 44,
51: 45,
52: 46,
53: 47,
54: 48,
55: 49,
56: 50,
57: 51,
58: 52,
59: 53,
60: 54,
61: 55,
62: 56,
63: 57,
64: 58,
65: 59,
67: 60,
70: 61,
72: 62,
73: 63,
74: 64,
75: 65,
76: 66,
77: 67,
78: 68,
79: 69,
80: 70,
81: 71,
82: 72,
84: 73,
85: 74,
86: 75,
87: 76,
88: 77,
89: 78,
90: 79,
}
def _extract_bounding_box_and_labels(image, annotations, yolo_preprocess=False):
# returns bounding boxes in ltrb format scaled to [0, 1] and labels
boxes = []
labels = []
for annotation in annotations:
class_id = int(annotation["category_id"])
if yolo_preprocess:
if class_id in _COCO_CLASSES_90_to_80:
class_id = _COCO_CLASSES_90_to_80[class_id]
else:
continue
boxes.append(annotation["bbox"])
labels.append(class_id)
boxes = torch.FloatTensor(boxes)
labels = torch.Tensor(labels).long()
if boxes.numel() == 0:
return boxes, labels
# convert boxes from ltwh to ltrb
boxes[:, 2] = boxes[:, 0] + boxes[:, 2] # r = l + w
boxes[:, 3] = boxes[:, 1] + boxes[:, 3] # b = t + h
# scale boxes to [0, 1]
boxes[:, [0, 2]] /= image.width # scale width dimensions
boxes[:, [1, 3]] /= image.height # scale height dimensions
return boxes, labels | null |
21,248 | import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
from sparseml.pytorch.utils import ssd_random_crop
The provided code snippet includes necessary dependencies for implementing the `ssd_random_crop_image_and_annotations` function. Write a Python function `def ssd_random_crop_image_and_annotations( image: Image.Image, annotations: Tuple[Tensor, Tensor] ) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]` to solve the following problem:
Wraps sparseml.pytorch.utils.ssd_random_crop to work in the AnnotatedImageTransforms pipeline. :param image: the image to crop :param annotations: a tuple of bounding boxes and their labels for this image :return: A tuple of the cropped image and annotations
Here is the function:
def ssd_random_crop_image_and_annotations(
image: Image.Image, annotations: Tuple[Tensor, Tensor]
) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]:
"""
Wraps sparseml.pytorch.utils.ssd_random_crop to work in the
AnnotatedImageTransforms pipeline.
:param image: the image to crop
:param annotations: a tuple of bounding boxes and their labels for this image
:return: A tuple of the cropped image and annotations
"""
boxes, labels = annotations
if labels.numel() > 0:
image, boxes, labels = ssd_random_crop(image, boxes, labels)
return image, (boxes, labels) | Wraps sparseml.pytorch.utils.ssd_random_crop to work in the AnnotatedImageTransforms pipeline. :param image: the image to crop :param annotations: a tuple of bounding boxes and their labels for this image :return: A tuple of the cropped image and annotations |
21,249 | import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
from sparseml.pytorch.utils import ssd_random_crop
The provided code snippet includes necessary dependencies for implementing the `random_horizontal_flip_image_and_annotations` function. Write a Python function `def random_horizontal_flip_image_and_annotations( image: Image.Image, annotations: Tuple[Tensor, Tensor], p: float = 0.5 ) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]` to solve the following problem:
Performs a horizontal flip on given image and bounding boxes with probability p. :param image: the image to randomly flip :param annotations: a tuple of bounding boxes and their labels for this image :param p: the probability to flip with. Default is 0.5 :return: A tuple of the randomly flipped image and annotations
Here is the function:
def random_horizontal_flip_image_and_annotations(
image: Image.Image, annotations: Tuple[Tensor, Tensor], p: float = 0.5
) -> Tuple[Image.Image, Tuple[Tensor, Tensor]]:
"""
Performs a horizontal flip on given image and bounding boxes with probability p.
:param image: the image to randomly flip
:param annotations: a tuple of bounding boxes and their labels for this image
:param p: the probability to flip with. Default is 0.5
:return: A tuple of the randomly flipped image and annotations
"""
if torchvision_import_error is not None:
raise torchvision_import_error
boxes, labels = annotations
if random.random() < p:
if labels.numel() > 0:
boxes[:, [0, 2]] = 1.0 - boxes[:, [2, 0]] # flip width dimensions
image = torchvision_functional.hflip(image)
return image, (boxes, labels) | Performs a horizontal flip on given image and bounding boxes with probability p. :param image: the image to randomly flip :param annotations: a tuple of bounding boxes and their labels for this image :param p: the probability to flip with. Default is 0.5 :return: A tuple of the randomly flipped image and annotations |
21,250 | import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
from sparseml.pytorch.utils import ssd_random_crop
The provided code snippet includes necessary dependencies for implementing the `yolo_collate_fn` function. Write a Python function `def yolo_collate_fn( batch: List[Any], ) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]` to solve the following problem:
Collate function to be used for creating a DataLoader with values for Yolo model input. :param batch: a batch of data points and annotations transformed by bounding_box_and_labels_to_yolo_fmt :return: the batch stacked as tensors for all values except for the original annotations
Here is the function:
def yolo_collate_fn(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values for Yolo model
input.
:param batch: a batch of data points and annotations transformed by
bounding_box_and_labels_to_yolo_fmt
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
targets = []
annotations = []
for idx, (image, (target, annotation)) in enumerate(batch):
images.append(image.unsqueeze(0))
img_label = torch.ones(target.size(0), 1) * idx
targets.append(torch.cat((img_label, target), 1))
annotations.append(annotation)
images = torch.cat(images, 0)
targets = torch.cat(targets, 0)
return images, (targets, annotations) | Collate function to be used for creating a DataLoader with values for Yolo model input. :param batch: a batch of data points and annotations transformed by bounding_box_and_labels_to_yolo_fmt :return: the batch stacked as tensors for all values except for the original annotations |
21,251 | import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
from sparseml.pytorch.utils import ssd_random_crop
The provided code snippet includes necessary dependencies for implementing the `ssd_collate_fn` function. Write a Python function `def ssd_collate_fn( batch: List[Any], ) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]` to solve the following problem:
Collate function to be used for creating a DataLoader with values transformed by encode_annotation_bounding_boxes. :param batch: a batch of data points transformed by encode_annotation_bounding_boxes :return: the batch stacked as tensors for all values except for the original annotations
Here is the function:
def ssd_collate_fn(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values transformed by
encode_annotation_bounding_boxes.
:param batch: a batch of data points transformed by encode_annotation_bounding_boxes
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
enc_boxes = []
enc_labels = []
annotations = []
for image, (enc_box, enc_label, annotation) in batch:
images.append(image.unsqueeze(0))
enc_boxes.append(enc_box.unsqueeze(0))
enc_labels.append(enc_label.unsqueeze(0))
annotations.append(annotation)
images = torch.cat(images, 0)
enc_boxes = torch.cat(enc_boxes, 0)
enc_labels = torch.cat(enc_labels, 0)
return images, (enc_boxes, enc_labels, annotations) | Collate function to be used for creating a DataLoader with values transformed by encode_annotation_bounding_boxes. :param batch: a batch of data points transformed by encode_annotation_bounding_boxes :return: the batch stacked as tensors for all values except for the original annotations |
21,252 | import random
from typing import Any, Callable, List, Tuple
import torch
from PIL import Image
from torch import Tensor
from sparseml.pytorch.utils import ssd_random_crop
def bounding_box_and_labels_to_yolo_fmt(annotations):
boxes, labels = annotations
if boxes.numel() == 0:
return torch.zeros(0, 5)
cx = (boxes[:, 0] + boxes[:, 2]) / 2
cy = (boxes[:, 1] + boxes[:, 3]) / 2
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
return torch.stack((labels, cx, cy, w, h)).T | null |
21,253 | from typing import Any, Callable, List, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
from sparseml.optim import (
PruningLossSensitivityAnalysis,
default_pruning_sparsities_loss,
)
from sparseml.pytorch.optim.mask_creator_pruning import UnstructuredPruningMaskCreator
from sparseml.pytorch.optim.mask_pruning import ModuleParamPruningMask
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
BaseLogger,
LossWrapper,
ModuleRunFuncs,
ModuleTester,
get_prunable_layers,
infinite_data_loader,
)
The provided code snippet includes necessary dependencies for implementing the `model_prunability_magnitude` function. Write a Python function `def model_prunability_magnitude(module: Module)` to solve the following problem:
Calculate the approximate sensitivity for an overall model. Range of the values are not scaled to anything, so must be taken in context with other known models. :param module: the model to calculate the sensitivity for :return: the approximated sensitivity
Here is the function:
def model_prunability_magnitude(module: Module):
"""
Calculate the approximate sensitivity for an overall model.
Range of the values are not scaled to anything, so must be taken in context
with other known models.
:param module: the model to calculate the sensitivity for
:return: the approximated sensitivity
"""
prunable = get_prunable_layers(module)
tensors = []
for (name, layer) in prunable:
weight = getattr(layer, "weight")
values = weight.view(-1).abs()
tensors.append(values)
all_weights = torch.cat(tensors)
avg = all_weights.mean().item()
return avg | Calculate the approximate sensitivity for an overall model. Range of the values are not scaled to anything, so must be taken in context with other known models. :param module: the model to calculate the sensitivity for :return: the approximated sensitivity |
21,254 | from typing import Any, Callable, List, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
from sparseml.optim import (
PruningLossSensitivityAnalysis,
default_pruning_sparsities_loss,
)
from sparseml.pytorch.optim.mask_creator_pruning import UnstructuredPruningMaskCreator
from sparseml.pytorch.optim.mask_pruning import ModuleParamPruningMask
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
BaseLogger,
LossWrapper,
ModuleRunFuncs,
ModuleTester,
get_prunable_layers,
infinite_data_loader,
)
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sens_magnitude` function. Write a Python function `def pruning_loss_sens_magnitude( module: Module, sparsity_levels: Union[ List[float], Tuple[float, ...] ] = default_pruning_sparsities_loss(True), ) -> PruningLossSensitivityAnalysis` to solve the following problem:
Approximated kernel sparsity (pruning) loss analysis for a given model. Returns the results for each prunable param (conv, linear) in the model. :param module: the model to calculate the sparse sensitivity analysis for :param sparsity_levels: the sparsity levels to calculate the loss for for each param :return: the analysis results for the model
Here is the function:
def pruning_loss_sens_magnitude(
module: Module,
sparsity_levels: Union[
List[float], Tuple[float, ...]
] = default_pruning_sparsities_loss(True),
) -> PruningLossSensitivityAnalysis:
"""
Approximated kernel sparsity (pruning) loss analysis for a given model.
Returns the results for each prunable param (conv, linear) in the model.
:param module: the model to calculate the sparse sensitivity analysis for
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:return: the analysis results for the model
"""
prunable = get_prunable_layers(module)
analysis = PruningLossSensitivityAnalysis()
for index, (name, layer) in enumerate(prunable):
weight = getattr(layer, "weight")
name = "{}.weight".format(name)
values, _ = weight.view(-1).abs().sort()
prev_index = 0
for sparsity in sparsity_levels:
val_index = round(sparsity * len(values))
if val_index >= len(values):
val_index = len(values) - 1
if sparsity <= 1e-9:
baseline = True
sparsity = 0.0
sparse_avg = 0.0
else:
baseline = False
if val_index > prev_index:
sparse_avg = values[prev_index:val_index].mean().item()
prev_index = val_index
else:
sparse_avg = values[val_index].item()
prev_index = val_index + 1
analysis.add_result(None, name, index, sparsity, sparse_avg, baseline)
return analysis | Approximated kernel sparsity (pruning) loss analysis for a given model. Returns the results for each prunable param (conv, linear) in the model. :param module: the model to calculate the sparse sensitivity analysis for :param sparsity_levels: the sparsity levels to calculate the loss for for each param :return: the analysis results for the model |
21,255 | from typing import Any, Callable, List, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
from sparseml.optim import (
PruningLossSensitivityAnalysis,
default_pruning_sparsities_loss,
)
from sparseml.pytorch.optim.mask_creator_pruning import UnstructuredPruningMaskCreator
from sparseml.pytorch.optim.mask_pruning import ModuleParamPruningMask
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
BaseLogger,
LossWrapper,
ModuleRunFuncs,
ModuleTester,
get_prunable_layers,
infinite_data_loader,
)
def _sensitivity_callback(
prunable_layers: List[Tuple[str, Module]],
sparsity_levels: List[int],
steps_per_measurement: int,
analysis: PruningLossSensitivityAnalysis,
loss_key: str,
) -> Callable:
measurement_steps = 0
layer_index = -1
sparsity_index = -1
current_mask = None
def complete_measurement():
"""
Uses complete_measurement to handle when all of the required steps have been
taken for a given layer and sparsity level.
This handles incrementing to the next sparsity level.
If all sparsity levels are complete,
increments to the next layer and starts from the initial sparsity level.
Should only be invoked when all measurements have been taken.
"""
nonlocal measurement_steps
nonlocal layer_index
nonlocal sparsity_index
nonlocal current_mask
measurement_steps = 0
sparsity_index += 1
if 0 <= sparsity_index < len(sparsity_levels) and 0 <= layer_index < len(
prunable_layers
):
# increment sparsity level for current layer
current_mask.set_param_masks_from_sparsity(sparsity_levels[sparsity_index])
else:
# go to next layer
sparsity_index = 0
layer_index += 1
if current_mask:
current_mask.enabled = False
current_mask.reset()
del current_mask
current_mask = None
if layer_index < len(prunable_layers):
current_mask = ModuleParamPruningMask(
[prunable_layers[layer_index][1]],
store_init=True,
mask_creator=UnstructuredPruningMaskCreator(),
)
current_mask.enabled = True
if sparsity_levels[sparsity_index] > 0.0:
current_mask.set_param_masks_from_sparsity(
sparsity_levels[sparsity_index]
)
complete_measurement()
def batch_end(
epoch: int,
step: int,
batch_size: int,
data: Any,
pred: Any,
losses: Any,
):
nonlocal measurement_steps
measurement_steps += 1
if layer_index < len(prunable_layers):
analysis.add_result(
None,
"{}.weight".format(prunable_layers[layer_index][0]),
sparsity_index,
sparsity_levels[sparsity_index],
losses[loss_key].item(),
baseline=sparsity_levels[sparsity_index] < 1e-9,
)
if measurement_steps >= steps_per_measurement:
complete_measurement()
return batch_end
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sens_one_shot` function. Write a Python function `def pruning_loss_sens_one_shot( module: Module, data: DataLoader, loss: Union[LossWrapper, Callable[[Any, Any], Tensor]], device: str, steps_per_measurement: int, sparsity_levels: List[int] = default_pruning_sparsities_loss(False), loss_key: str = DEFAULT_LOSS_KEY, tester_run_funcs: ModuleRunFuncs = None, tester_loggers: List[BaseLogger] = None, show_progress: bool = True, ) -> PruningLossSensitivityAnalysis` to solve the following problem:
Run a one shot sensitivity analysis for kernel sparsity. It does not retrain, and instead puts the model to eval mode. Moves layer by layer to calculate the sensitivity analysis for each and resets the previously run layers. Note, by default it caches the data. This means it is not parallel for data loading and the first run can take longer. Subsequent sparsity checks for layers and levels will be much faster. :param module: the module to run the kernel sparsity sensitivity analysis over will extract all prunable layers out :param data: the data to run through the module for calculating the sensitivity analysis :param loss: the loss function to use for the sensitivity analysis :param device: the device to run the analysis on; ex: cpu, cuda :param steps_per_measurement: the number of samples or items to take for each measurement at each sparsity lev :param sparsity_levels: the sparsity levels to check for each layer to calculate sensitivity :param loss_key: the key for the loss function to track in the returned dict :param tester_run_funcs: override functions to use in the ModuleTester that runs :param tester_loggers: loggers to log data to while running the analysis :param show_progress: track progress of the runs if True :return: the sensitivity results for every layer that is prunable
Here is the function:
def pruning_loss_sens_one_shot(
module: Module,
data: DataLoader,
loss: Union[LossWrapper, Callable[[Any, Any], Tensor]],
device: str,
steps_per_measurement: int,
sparsity_levels: List[int] = default_pruning_sparsities_loss(False),
loss_key: str = DEFAULT_LOSS_KEY,
tester_run_funcs: ModuleRunFuncs = None,
tester_loggers: List[BaseLogger] = None,
show_progress: bool = True,
) -> PruningLossSensitivityAnalysis:
"""
Run a one shot sensitivity analysis for kernel sparsity.
It does not retrain, and instead puts the model to eval mode.
Moves layer by layer to calculate the sensitivity analysis for each and
resets the previously run layers.
Note, by default it caches the data.
This means it is not parallel for data loading and the first run can take longer.
Subsequent sparsity checks for layers and levels will be much faster.
:param module: the module to run the kernel sparsity sensitivity analysis over
will extract all prunable layers out
:param data: the data to run through the module for calculating the sensitivity
analysis
:param loss: the loss function to use for the sensitivity analysis
:param device: the device to run the analysis on; ex: cpu, cuda
:param steps_per_measurement: the number of samples or items to take for each
measurement at each sparsity lev
:param sparsity_levels: the sparsity levels to check for each layer to calculate
sensitivity
:param loss_key: the key for the loss function to track in the returned dict
:param tester_run_funcs: override functions to use in the ModuleTester that runs
:param tester_loggers: loggers to log data to while running the analysis
:param show_progress: track progress of the runs if True
:return: the sensitivity results for every layer that is prunable
"""
analysis = PruningLossSensitivityAnalysis()
tester = ModuleTester(
module,
device,
loss,
loggers=tester_loggers,
log_summary=False,
log_steps=max(1, round(steps_per_measurement / 10)),
)
layers = get_prunable_layers(module)
batch_end = _sensitivity_callback(
layers, sparsity_levels, steps_per_measurement, analysis, loss_key
)
batch_end_hook = tester.run_hooks.register_batch_end_hook(batch_end)
if tester_run_funcs is not None:
tester.run_funcs.copy(tester_run_funcs)
data_loader = infinite_data_loader(
data, early_stop_steps=steps_per_measurement, cache=True
)
tester.run(
data_loader,
desc="KS Analysis",
show_progress=show_progress,
track_results=False,
max_steps=steps_per_measurement * len(sparsity_levels) * len(layers),
)
batch_end_hook.remove()
return analysis | Run a one shot sensitivity analysis for kernel sparsity. It does not retrain, and instead puts the model to eval mode. Moves layer by layer to calculate the sensitivity analysis for each and resets the previously run layers. Note, by default it caches the data. This means it is not parallel for data loading and the first run can take longer. Subsequent sparsity checks for layers and levels will be much faster. :param module: the module to run the kernel sparsity sensitivity analysis over will extract all prunable layers out :param data: the data to run through the module for calculating the sensitivity analysis :param loss: the loss function to use for the sensitivity analysis :param device: the device to run the analysis on; ex: cpu, cuda :param steps_per_measurement: the number of samples or items to take for each measurement at each sparsity lev :param sparsity_levels: the sparsity levels to check for each layer to calculate sensitivity :param loss_key: the key for the loss function to track in the returned dict :param tester_run_funcs: override functions to use in the ModuleTester that runs :param tester_loggers: loggers to log data to while running the analysis :param show_progress: track progress of the runs if True :return: the sensitivity results for every layer that is prunable |
21,256 | from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Parameter
class PruningParamsScorer(ABC):
"""
Base abstract class for scoring model parameters for pruning
:param params: list of model Parameters to track and score
"""
def __init__(self, params: List[Parameter]):
self._params = params
self._last_applied_sparsity = 0.0
def score_parameters(self) -> List[Tensor]:
"""
:return: List of Tensors the same shapes as the given Parameters that
correspond to their scores to be pruned by
"""
raise NotImplementedError()
def pre_optim_step_update(self, masks: List[Tensor]):
"""
Perform any required logic for tracking Parameter data and gradients before
an Optimizer step is applied to the model.
:param masks: latest masks that are applied to these parameters
"""
pass
def mask_update(self, masks: List[Tensor], mask_diffs: List[Tensor]):
"""
Perform any updates based on the latest mask to be applied to the weights
immediately after this function completes
:param masks: latest masks to be applied to these parameters
:param mask_diffs: mask diff values returned by mask_difference for these
masks that describe how these masks changed since the last update
"""
pass
def update_last_applied_sparsity(self, sparsity: float):
"""
:param sparsity: sparsity level between 0.0 and 1.0 that was the last value
set for the given parameters
"""
self._last_applied_sparsity = sparsity
def check_regen_param_vals(self):
"""
Check that all variables based on the params are on the correct device
and regenerate if not
"""
pass
def on_pruning_end(self):
"""
Perform any cleanup after pruning is complete
"""
pass
def get_name() -> str:
"""
:return: name of this pruning method
"""
raise NotImplementedError()
AVALIABLE_SCORER_CLASSES = [
MagnitudePruningParamsScorer,
MovementPruningParamsScorer,
]
The provided code snippet includes necessary dependencies for implementing the `create_pruning_param_scorer` function. Write a Python function `def create_pruning_param_scorer( params: List[Parameter], score_type: str ) -> PruningParamsScorer` to solve the following problem:
:param params: List of Parameters for the created PruningParamsScorer to track :param score_type: String name of scoring type to use. Valid options are 'magnitude', or 'movement'
Here is the function:
def create_pruning_param_scorer(
params: List[Parameter], score_type: str
) -> PruningParamsScorer:
"""
:param params: List of Parameters for the created PruningParamsScorer to track
:param score_type: String name of scoring type to use. Valid options are
'magnitude', or 'movement'
"""
scorer_name_to_constructor = {
scorer.get_name(): scorer for scorer in AVALIABLE_SCORER_CLASSES
}
if isinstance(score_type, str):
if score_type not in scorer_name_to_constructor:
raise ValueError(
f"Invalid score_type {score_type}. Valid score types include "
f"{list(scorer_name_to_constructor.keys())}"
)
return scorer_name_to_constructor[score_type](params)
raise ValueError(
f"Recieved unsupported type for score_type: {type(score_type)} "
"expected string"
) | :param params: List of Parameters for the created PruningParamsScorer to track :param score_type: String name of scoring type to use. Valid options are 'magnitude', or 'movement' |
21,257 | from typing import Any, Callable, List, Tuple, Union
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from sparseml.optim import LRLossSensitivityAnalysis
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
BaseLogger,
LossWrapper,
ModuleRunFuncs,
ModuleRunResults,
ModuleTrainer,
infinite_data_loader,
set_optim_learning_rate,
)
def default_exponential_check_lrs(
init_lr: float = 1e-6, final_lr: float = 0.5, lr_mult: float = 1.1
) -> Tuple[float, ...]:
"""
Get the default learning rates to check between init_lr and final_lr.
:param init_lr: the initial learning rate in the returned list
:param final_lr: the final learning rate in the returned list
:param lr_mult: the multiplier increase for each step between
init_lr and final_lr
:return: the list of created lrs that increase exponentially between
init_lr and final_lr according to lr_mult
"""
check_lrs = [init_lr] # type: List[float]
while check_lrs[-1] < final_lr:
check_lrs.append(check_lrs[-1] * lr_mult)
check_lrs.append(final_lr)
return tuple(check_lrs)
def _sensitivity_callback(
check_lrs: Union[List[float], Tuple[float, ...]],
steps_per_measurement: int,
optim: Optimizer,
analysis: LRLossSensitivityAnalysis,
loss_key: str,
) -> Tuple[Callable, Callable]:
measurement_steps = 0
check_index = -1
lr_results = None
def complete_lr():
nonlocal measurement_steps
nonlocal check_index
nonlocal lr_results
if measurement_steps > 0 and check_index >= 0 and check_index < len(check_lrs):
lr_res = [res.item() for res in lr_results.result_list_tensor(loss_key)]
analysis.add_result(check_lrs[check_index], lr_res)
measurement_steps = 0
check_index += 1
lr_results = ModuleRunResults()
if check_index < len(check_lrs):
set_optim_learning_rate(optim, check_lrs[check_index])
complete_lr() # initial to set the lr
def batch_end(
epoch: int,
step: int,
batch_size: int,
data: Any,
pred: Any,
losses: Any,
):
nonlocal measurement_steps
measurement_steps += 1
if measurement_steps >= steps_per_measurement:
complete_lr()
lr_results.append(losses, batch_size)
def completed():
complete_lr() # make sure we didn't miss any
return batch_end, completed
The provided code snippet includes necessary dependencies for implementing the `lr_loss_sensitivity` function. Write a Python function `def lr_loss_sensitivity( module: Module, data: DataLoader, loss: Union[LossWrapper, Callable[[Any, Any], Tensor]], optim: Optimizer, device: str, steps_per_measurement: int, check_lrs: Union[List[float], Tuple[float, ...]] = default_exponential_check_lrs(), loss_key: str = DEFAULT_LOSS_KEY, trainer_run_funcs: ModuleRunFuncs = None, trainer_loggers: List[BaseLogger] = None, show_progress: bool = True, ) -> LRLossSensitivityAnalysis` to solve the following problem:
Implementation for handling running sensitivity analysis for learning rates on modules. :param module: the module to run the learning rate sensitivity analysis over, it is expected to already be on the correct device :param data: the data to run through the module for calculating the sensitivity analysis :param loss: the loss function to use for the sensitivity analysis :param optim: the optimizer to run the sensitivity analysis with :param device: the device to run the analysis on; ex: cpu, cuda. module must already be on that device, this is used to place then data on that same device. :param steps_per_measurement: the number of batches to run through for the analysis at each LR :param check_lrs: the learning rates to check for analysis (will sort them small to large before running) :param loss_key: the key for the loss function to track in the returned dict :param trainer_run_funcs: override functions for ModuleTrainer class :param trainer_loggers: loggers to log data to while running the analysis :param show_progress: track progress of the runs if True :return: a list of tuples containing the analyzed learning rate at 0 and the ModuleRunResults in 1, ModuleRunResults being a collection of all the batch results run through the module at that LR
Here is the function:
def lr_loss_sensitivity(
module: Module,
data: DataLoader,
loss: Union[LossWrapper, Callable[[Any, Any], Tensor]],
optim: Optimizer,
device: str,
steps_per_measurement: int,
check_lrs: Union[List[float], Tuple[float, ...]] = default_exponential_check_lrs(),
loss_key: str = DEFAULT_LOSS_KEY,
trainer_run_funcs: ModuleRunFuncs = None,
trainer_loggers: List[BaseLogger] = None,
show_progress: bool = True,
) -> LRLossSensitivityAnalysis:
"""
Implementation for handling running sensitivity analysis for
learning rates on modules.
:param module: the module to run the learning rate sensitivity analysis over,
it is expected to already be on the correct device
:param data: the data to run through the module for calculating
the sensitivity analysis
:param loss: the loss function to use for the sensitivity analysis
:param optim: the optimizer to run the sensitivity analysis with
:param device: the device to run the analysis on; ex: cpu, cuda.
module must already be on that device, this is used to place then data
on that same device.
:param steps_per_measurement: the number of batches to run through for
the analysis at each LR
:param check_lrs: the learning rates to check for analysis
(will sort them small to large before running)
:param loss_key: the key for the loss function to track in the returned dict
:param trainer_run_funcs: override functions for ModuleTrainer class
:param trainer_loggers: loggers to log data to while running the analysis
:param show_progress: track progress of the runs if True
:return: a list of tuples containing the analyzed learning rate at 0
and the ModuleRunResults in 1, ModuleRunResults being a collection
of all the batch results run through the module at that LR
"""
analysis = LRLossSensitivityAnalysis()
trainer = ModuleTrainer(
module,
device,
loss,
optim,
loggers=trainer_loggers,
log_summary=False,
log_steps=max(1, round(steps_per_measurement / 10)),
)
batch_end, completed = _sensitivity_callback(
check_lrs, steps_per_measurement, optim, analysis, loss_key
)
batch_end_hook = trainer.run_hooks.register_batch_end_hook(batch_end)
if trainer_run_funcs is not None:
trainer.run_funcs.copy(trainer_run_funcs)
data_loader = infinite_data_loader(data)
trainer.run(
data_loader,
desc="LR Analysis",
show_progress=show_progress,
track_results=False,
max_steps=steps_per_measurement * len(check_lrs),
)
completed()
batch_end_hook.remove()
return analysis | Implementation for handling running sensitivity analysis for learning rates on modules. :param module: the module to run the learning rate sensitivity analysis over, it is expected to already be on the correct device :param data: the data to run through the module for calculating the sensitivity analysis :param loss: the loss function to use for the sensitivity analysis :param optim: the optimizer to run the sensitivity analysis with :param device: the device to run the analysis on; ex: cpu, cuda. module must already be on that device, this is used to place then data on that same device. :param steps_per_measurement: the number of batches to run through for the analysis at each LR :param check_lrs: the learning rates to check for analysis (will sort them small to large before running) :param loss_key: the key for the loss function to track in the returned dict :param trainer_run_funcs: override functions for ModuleTrainer class :param trainer_loggers: loggers to log data to while running the analysis :param show_progress: track progress of the runs if True :return: a list of tuples containing the analyzed learning rate at 0 and the ModuleRunResults in 1, ModuleRunResults being a collection of all the batch results run through the module at that LR |
21,258 | import random
from abc import ABC, abstractmethod
from typing import Iterable, List, Optional, Union
import torch
from torch import Tensor
class PruningMaskCreator(ABC):
"""
Base abstract class for a sparsity mask creator.
Subclasses should define all methods for creating masks
"""
def create_sparsity_masks_from_tensor(self, tensors: List[Tensor]) -> List[Tensor]:
"""
:param tensors: list of tensors to calculate a masks based on their values
:return: list of masks derived from each of the given tensors
"""
return [torch.ne(tensor, 0.0).type(tensor.type()) for tensor in tensors]
def create_sparsity_masks_from_threshold(
self, tensors: List[Tensor], threshold: Union[float, Tensor]
) -> List[Tensor]:
"""
:param tensors: list of tensors to calculate a masks based on their contained
values
:param threshold: a threshold to determine cutoff for sparsification
:return: list of masks derived from each of the given tensors and the threshold
"""
raise NotImplementedError()
def create_sparsity_masks(
self,
tensors: List[Tensor],
sparsity: Union[float, List[float]],
global_sparsity: bool = False,
) -> List[Tensor]:
"""
:param tensors: list of tensors to calculate a masks based on their contained
values
:param sparsity: the desired sparsity to reach within the mask
(decimal fraction of zeros) can also be a list where each element is a
sparsity for a tensor in the same position in the tensor list. If global
sparsity is enabled, all values of the sparsity list must be the same
:param global_sparsity: if True, sparsity masks will be created such that the
average sparsity across all given tensors is the target sparsity with the
lowest global values masked. If False, each tensor will be masked to the
target sparsity ranking values within each individual tensor. Default is
False
:return: list of masks (0.0 for values that are masked, 1.0 for values that are
unmasked) calculated from the tensors such that the desired number of zeros
matches the sparsity.
"""
raise NotImplementedError()
class BlockPruningMaskCreator(GroupedPruningMaskCreator):
"""
Structured sparsity mask creator that groups the input tensor into blocks of
shape block_shape.
:param block_shape: The shape in and out channel should take in blocks. Should be
a list of exactly two integers that divide the input tensors evenly on the
channel dimensions. -1 for a dimension blocks across the entire dimension
:param grouping_fn_name: The name of the torch grouping function to reduce
dimensions by
"""
def __init__(
self,
block_shape: List[int],
grouping_fn_name: str = "mean",
):
if len(block_shape) < 2:
raise ValueError(
(
"Invalid block_shape: {}, "
"block_shape must have length == 2 for in and out channels"
).format(block_shape)
)
if len(block_shape) > 2 and not all([shape == 1 for shape in block_shape[2:]]):
# after in and out channels, only 1 can be used for other dimensions
raise ValueError(
(
"Invalid block_shape: {}, "
"block_shape for indices not in [0, 1] must be equal to 1"
).format(block_shape)
)
self._block_shape = block_shape
self._grouping_fn_name = grouping_fn_name
def group_tensor(self, tensor: Tensor) -> Tensor:
"""
:param tensor: The tensor to transform
:return: The mean values of the tensor grouped by blocks of shape
self._block_shape
"""
blocked_tens_shape = self._get_blocked_tens_shape_and_validate(tensor.shape)
blocked_tensor = tensor.reshape(blocked_tens_shape)
reduced_blocks = GroupedPruningMaskCreator.reduce_tensor(
blocked_tensor, 1, self._grouping_fn_name
)
return reduced_blocks.type(tensor.type())
def _map_mask_to_tensor(
self,
grouped_mask: Tensor,
original_tensor_shape: torch.Size,
tensor_idx: Optional[int] = None,
) -> Tensor:
"""
:param grouped_mask: A binary mask the size of a tensor from group_tensor
:param original_tensor_shape: Shape of the original tensor grouped_mask
derives from
:param tensor_idx: optional index this tensor was passed into a tensor
list for mask creation
:return: The values from grouped_mask mapped to a tensor of size
original_tensor_shape
"""
blocked_tens_shape = self._get_blocked_tens_shape_and_validate(
original_tensor_shape
)
# expand so every element has a corresponding value in the original tensor
block_mask = grouped_mask.reshape(blocked_tens_shape[0], blocked_tens_shape[2])
block_mask = block_mask.unsqueeze(1)
block_mask = block_mask.expand(*blocked_tens_shape).contiguous()
return block_mask.reshape(original_tensor_shape)
def _get_blocked_tens_shape_and_validate(
self,
tens_shape: torch.Size,
) -> List[int]:
"""
:param tens_shape: The shape of the tensor to group in blocks
:return: shape of tens when blocked by block_shape
:raise: ValueError if we are unable to block tens by shape block_shape
"""
block_shape = self._block_shape
n_dims = len(tens_shape)
while len(block_shape) < n_dims: # Conv will have block shape [X, Y, 1, ..., 1]
block_shape.append(1)
for idx, shape in enumerate(block_shape):
if shape == -1:
block_shape[idx] = tens_shape[idx]
# Validate
if n_dims < 2:
raise ValueError(
"Invalid tensor shape {}."
" BlockSparsityMaskCreator can only create masks from tensors with 2 or"
" more dimensions, tensor has {}.".format(tens_shape, n_dims)
)
for tens_dim, block_dim in zip(tens_shape, block_shape):
if tens_dim % block_dim != 0:
raise ValueError(
f"Invalid block_shape {block_shape} for parameter shape "
f"{tens_shape}. Elements of block_shape must divide parameter "
f"shape evenly"
)
# Compute blocked tensor shape
if len(block_shape) > 1 and block_shape[1] > 1:
return [
tens_shape[0] * tens_shape[1] // (block_shape[0] * block_shape[1]),
block_shape[0] * block_shape[1],
-1,
]
else:
return [tens_shape[0] // block_shape[0], block_shape[0], -1]
def __str__(self):
return str(self._block_shape)
def __repr__(self):
return str(self)
mask_creator_name_to_constructor_lambda = {
"unstructured": lambda: UnstructuredPruningMaskCreator(),
"channel": lambda: DimensionSparsityMaskCreator("channel"),
"filter": lambda: DimensionSparsityMaskCreator("filter"),
"block": lambda: FourBlockMaskCreator(),
}
The provided code snippet includes necessary dependencies for implementing the `load_mask_creator` function. Write a Python function `def load_mask_creator(obj: Union[str, Iterable[int]]) -> PruningMaskCreator` to solve the following problem:
:param obj: Formatted string or block shape iterable specifying SparsityMaskCreator object to return :return: SparsityMaskCreator object created from obj
Here is the function:
def load_mask_creator(obj: Union[str, Iterable[int]]) -> PruningMaskCreator:
"""
:param obj: Formatted string or block shape iterable specifying SparsityMaskCreator
object to return
:return: SparsityMaskCreator object created from obj
"""
if isinstance(obj, str) and obj in mask_creator_name_to_constructor_lambda:
return mask_creator_name_to_constructor_lambda[obj]()
# Checking for a BlockSparsityMaskCreator string
if ("[" in obj and "]" in obj) or ("(" in obj and ")" in obj):
stripped_str = obj.strip("[|]|(|)")
block_shape = [int(s) for s in stripped_str.split(",")]
return BlockPruningMaskCreator(block_shape)
if isinstance(obj, list) or isinstance(obj, tuple):
return BlockPruningMaskCreator(obj)
raise ValueError(
"Invalid mask type string: {}, could not map to an object".format(obj)
) | :param obj: Formatted string or block shape iterable specifying SparsityMaskCreator object to return :return: SparsityMaskCreator object created from obj |
21,259 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.pytorch.base import check_torch_install, torch
from sparseml.pytorch.sparsification import sparsification_info
from sparseml.sparsification import SparsificationInfo
def detect_framework(item: Any) -> Framework:
"""
Detect the supported ML framework for a given item specifically for the
pytorch package.
Supported input types are the following:
- A Framework enum
- A string of any case representing the name of the framework
(deepsparse, onnx, keras, pytorch, tensorflow_v1)
- A supported file type within the framework such as model files:
(onnx, pth, h5, pb)
- An object from a supported ML framework such as a model instance
If the framework cannot be determined, will return Framework.unknown
:param item: The item to detect the ML framework for
:type item: Any
:return: The detected framework from the given item
:rtype: Framework
"""
framework = Framework.unknown
if isinstance(item, Framework):
_LOGGER.debug("framework detected from Framework instance")
framework = item
elif isinstance(item, str) and item.lower().strip() in Framework.__members__:
_LOGGER.debug("framework detected from Framework string instance")
framework = Framework[item.lower().strip()]
elif isinstance(item, str) and "torch" in item.lower().strip():
_LOGGER.debug("framework detected from torch text")
# string, check if it's a string saying onnx first
framework = Framework.pytorch
elif isinstance(item, str) and (
".pt" in item.lower().strip() or ".mar" in item.lower().strip()
):
_LOGGER.debug("framework detected from .pt or .mar")
# string, check if it's a file url or path that ends with onnx extension
framework = Framework.pytorch
elif check_torch_install(raise_on_error=False):
from torch.nn import Module
if isinstance(item, Module):
_LOGGER.debug("framework detected from pytorch instance")
# pytorch native support
framework = Framework.pytorch
return framework
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
The provided code snippet includes necessary dependencies for implementing the `is_supported` function. Write a Python function `def is_supported(item: Any) -> bool` to solve the following problem:
:param item: The item to detect the support for :type item: Any :return: True if the item is supported by pytorch, False otherwise :rtype: bool
Here is the function:
def is_supported(item: Any) -> bool:
"""
:param item: The item to detect the support for
:type item: Any
:return: True if the item is supported by pytorch, False otherwise
:rtype: bool
"""
framework = detect_framework(item)
return framework == Framework.pytorch | :param item: The item to detect the support for :type item: Any :return: True if the item is supported by pytorch, False otherwise :rtype: bool |
21,260 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.pytorch.base import check_torch_install, torch
from sparseml.pytorch.sparsification import sparsification_info
from sparseml.sparsification import SparsificationInfo
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
def get_version(
package_name: str,
raise_on_error: bool,
alternate_package_names: Optional[List[str]] = None,
) -> Optional[str]:
"""
:param package_name: The name of the full package, as it would be imported,
to get the version for
:type package_name: str
:param raise_on_error: True to raise an error if package is not installed
or couldn't be imported, False to return None
:type raise_on_error: bool
:param alternate_package_names: List of alternate names to look for the package
under if package_name is not found. Useful for nightly builds.
:type alternate_package_names: Optional[List[str]]
:return: the version of the desired package if detected, otherwise raises an error
:rtype: str
"""
current_version: Optional[str] = None
version_err = None
try:
current_version = pkg_resources.get_distribution(package_name).version
except Exception as err:
version_err = err
if version_err and alternate_package_names:
next_package = alternate_package_names.pop()
return get_version(next_package, raise_on_error, alternate_package_names)
if version_err and raise_on_error:
raise ImportError(
f"error while getting current version for {package_name}: {version_err}"
)
return current_version if not version_err else None
try:
import torch
torch_err = None
except Exception as err:
torch = object() # TODO: populate with fake object for necessary imports
torch_err = err
def check_torch_install(
min_version: Optional[str] = _TORCH_MIN_VERSION,
max_version: Optional[str] = _TORCH_MAX_VERSION,
raise_on_error: bool = True,
) -> bool:
"""
Check that the torch package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for torch that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for torch that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if torch is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if torch_err is not None:
if raise_on_error:
raise torch_err
return False
return check_version("torch", min_version, max_version, raise_on_error)
The provided code snippet includes necessary dependencies for implementing the `framework_info` function. Write a Python function `def framework_info() -> FrameworkInfo` to solve the following problem:
Detect the information for the onnx/onnxruntime framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for onnx/onnxruntime :rtype: FrameworkInfo
Here is the function:
def framework_info() -> FrameworkInfo:
"""
Detect the information for the onnx/onnxruntime framework such as package versions,
availability for core actions such as training and inference,
sparsification support, and inference provider support.
:return: The framework info for onnx/onnxruntime
:rtype: FrameworkInfo
"""
cpu_provider = FrameworkInferenceProviderInfo(
name="cpu",
description="Base CPU provider within PyTorch",
device="cpu",
supported_sparsification=SparsificationInfo(), # TODO: fill in when available
available=check_torch_install(raise_on_error=False),
properties={},
warnings=[],
)
gpu_provider = FrameworkInferenceProviderInfo(
name="cuda",
description="Base GPU CUDA provider within PyTorch",
device="gpu",
supported_sparsification=SparsificationInfo(), # TODO: fill in when available
available=(
check_torch_install(raise_on_error=False) and torch.cuda.is_available()
),
properties={},
warnings=[],
)
return FrameworkInfo(
framework=Framework.pytorch,
package_versions={
"torch": get_version(package_name="torch", raise_on_error=False),
"torchvision": (
get_version(package_name="torchvision", raise_on_error=False)
),
"onnx": get_version(package_name="onnx", raise_on_error=False),
"sparsezoo": get_version(
package_name="sparsezoo",
raise_on_error=False,
alternate_package_names=["sparsezoo-nightly"],
),
"sparseml": get_version(
package_name="sparseml",
raise_on_error=False,
alternate_package_names=["sparseml-nightly"],
),
},
sparsification=sparsification_info(),
inference_providers=[cpu_provider, gpu_provider],
properties={},
training_available=True,
sparsification_available=True,
exporting_onnx_available=True,
inference_available=True,
) | Detect the information for the onnx/onnxruntime framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for onnx/onnxruntime :rtype: FrameworkInfo |
21,261 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300Lite, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300MobileNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various MobileNet architecture backbones
:param version: the MobileNet version to use for this backbone
:param pretrained: True to load pretrained MobileNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
version: Union[str, int] = "2",
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
version = int(version)
assert int(version) in [1, 2]
self._version = version
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._version == 1:
return [1024, 512, 512, 256, 256, 256]
else:
return [(96, 320), 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: MobileNet feature extractor module to be used for an SSD model
"""
# Load MobileNet model
model_key = "mobilenet-v{}".format(self._version)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
# increase feature map to 38x38
if self._version == 2:
model.sections[3][0].spatial.conv.stride = (1, 1)
model.sections[5][0].spatial.conv.stride = (1, 1)
feature_blocks = list(model.sections.children())
return nn.Sequential(*feature_blocks)
key=["ssd300lite_mobilenetv2", "ssdlite_mobilenetv2"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd_lite",
sub_architecture="mobilenet_v2",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300lite_mobilenetv2` function. Write a Python function `def ssd300lite_mobilenetv2( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300Lite` to solve the following problem:
SSD 300 Lite with MobileNet V2 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained MobileNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD Lite MobileNet model
Here is the function:
def ssd300lite_mobilenetv2(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300Lite:
"""
SSD 300 Lite with MobileNet V2 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained MobileNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD Lite MobileNet model
"""
feature_extractor = SSD300MobileNetBackbone(
"2", pretrained_backbone, pretrained_path_backbone
)
return SSD300Lite(feature_extractor, 4, num_classes) | SSD 300 Lite with MobileNet V2 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained MobileNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD Lite MobileNet model |
21,262 | import math
from typing import List, Tuple, Union
from torch import Tensor, cat
from torch.nn import (
BatchNorm2d,
Conv2d,
MaxPool2d,
Module,
ModuleList,
Parameter,
Upsample,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu") | null |
21,263 | import math
from typing import List, Tuple, Union
from torch import Tensor, cat
from torch.nn import (
BatchNorm2d,
Conv2d,
MaxPool2d,
Module,
ModuleList,
Parameter,
Upsample,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,264 | import math
from typing import List, Tuple, Union
from torch import Tensor, cat
from torch.nn import (
BatchNorm2d,
Conv2d,
MaxPool2d,
Module,
ModuleList,
Parameter,
Upsample,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
class YoloV3(Module):
"""
Yolo v3 implementation matching standard Yolo v3 SPP configuration
:param num_classes: the number of classes to classify objects with
:param backbone: CNN backbone to this model
:param backbone_out_channels: The number of output channels in each of the
backbone's outputs.
:param anchor_groups: List of 3x2 Tensors of anchor point coordinates for
each of this model's detectors
"""
def __init__(
self,
num_classes: int,
backbone: Module,
backbone_out_channels: List[int],
anchor_groups: List[Tensor],
):
super().__init__()
self.num_classes = num_classes
self.backbone = backbone
assert len(backbone_out_channels) == 3 # only 3 supported for now
self.head = ModuleList(
[
_YoloConvsBlock(
in_channels=backbone_out_channels[0],
out_channels_k1=512,
out_channels_k3=1024,
total_layers=8,
spp_layer_idxs=[3], # replace third conv with SPP
),
_UpsampleBlock(in_channels=512, out_channels=256, scale_factor=2),
_YoloConvsBlock(
in_channels=256 + backbone_out_channels[1],
out_channels_k1=256,
out_channels_k3=512,
total_layers=6,
),
_UpsampleBlock(in_channels=256, out_channels=128, scale_factor=2),
_YoloConvsBlock(
in_channels=128 + backbone_out_channels[2],
out_channels_k1=128,
out_channels_k3=256,
total_layers=6,
),
]
)
self.detector = ModuleList(
[
_YoloDetectionBlock(1024, num_classes, 3),
_YoloDetectionBlock(512, num_classes, 3),
_YoloDetectionBlock(256, num_classes, 3),
]
)
self.anchor_groups = anchor_groups
def forward(self, inp: Tensor):
backbone_features = self.backbone(inp)
# run yolo head
backbone_feature_idx = 0 # track backbone output to use
current_tens = None # holds the latest output to be propagated through model
detector_inputs = []
for section in self.head:
if isinstance(section, _YoloConvsBlock):
section_input = (
backbone_features[backbone_feature_idx]
if current_tens is None
else cat([current_tens, backbone_features[backbone_feature_idx]], 1)
)
detector_input, current_tens = section(section_input)
detector_inputs.append(detector_input)
backbone_feature_idx += 1
else:
current_tens = section(current_tens)
# run detector layers
outputs = []
for detector_input, detector_layer in zip(detector_inputs, self.detector):
outputs.append(detector_layer(detector_input))
return outputs
key=["yolo", "yolo-v3", "yolo_v3"],
input_shape=(3, 640, 640),
domain="cv",
sub_domain="detection",
architecture="yolo_v3",
sub_architecture="spp",
default_dataset="coco",
default_desc="base",
class ModelRegistry(object):
"""
Registry class for creating models
"""
_CONSTRUCTORS = {} # type: Dict[str, Callable]
_ATTRIBUTES = {} # type: Dict[str, _ModelAttributes]
def available_keys() -> List[str]:
"""
:return: the keys (models) currently available in the registry
"""
return list(ModelRegistry._CONSTRUCTORS.keys())
def create(
key: Optional[str] = None,
pretrained: Union[bool, str] = False,
pretrained_path: str = None,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
**kwargs,
) -> Union[Module, Tuple[Module, Optional[str]]]:
"""
Create a new model for the given key
:param key: the model key (name) to create. If None, the key is read
from the state_dict['arch_key'] of the model.
:param pretrained: True to load pretrained weights; to load a specific version
give a string with the name of the version (pruned-moderate, base).
Default None
:param pretrained_path: A model file path to load into the created model
:param pretrained_dataset: The dataset to load for the model
:param load_strict: True to make sure all states are found in and
loaded in model, False otherwise; default True
:param ignore_error_tensors: tensors to ignore if there are errors in loading
:param kwargs: any keyword args to supply to the model constructor
:return: The instantiated model if key is given else a Tuple containing
the instantiated model and the loaded key
"""
key_copy = key
if key_copy is None:
if pretrained_path is None:
raise ValueError("Must provide a key or a pretrained_path")
if pretrained_path.startswith("zoo:"):
pretrained_path = download_framework_model_by_recipe_type(
Model(pretrained_path)
)
_checkpoint = torch.load(pretrained_path)
if "arch_key" in _checkpoint:
key_copy = _checkpoint["arch_key"]
else:
raise ValueError("No `arch_key` found in checkpoint")
if key_copy not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key_copy, ModelRegistry._CONSTRUCTORS
)
)
model = ModelRegistry._CONSTRUCTORS[key_copy](
pretrained=pretrained,
pretrained_path=pretrained_path,
pretrained_dataset=pretrained_dataset,
load_strict=load_strict,
ignore_error_tensors=ignore_error_tensors,
**kwargs,
)
return (model, key_copy) if key is None else model
def create_zoo_model(
key: str,
pretrained: Union[bool, str] = True,
pretrained_dataset: str = None,
) -> Model:
"""
Create a sparsezoo Model for the desired model in the zoo
:param key: the model key (name) to retrieve
:param pretrained: True to load pretrained weights; to load a specific version
give a string with the name of the version (optim, optim-perf), default True
:param pretrained_dataset: The dataset to load for the model
:return: the sparsezoo Model reference for the given model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
attributes = ModelRegistry._ATTRIBUTES[key]
sparse_name, sparse_category, sparse_target = parse_optimization_str(
pretrained if isinstance(pretrained, str) else attributes.default_desc
)
model_dict = {
"domain": attributes.domain,
"sub_domain": attributes.sub_domain,
"architecture": attributes.architecture,
"sub_architecture": attributes.sub_architecture,
"framework": PYTORCH_FRAMEWORK,
"repo": attributes.repo_source,
"dataset": attributes.default_dataset
if pretrained_dataset is None
else pretrained_dataset,
"sparse_tag": f"{sparse_name}-{sparse_category}",
}
stub = model_args_to_stub(**model_dict)
return Model(stub)
def input_shape(key: str) -> Any:
"""
:param key: the model key (name) to create
:return: the specified input shape for the model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
return ModelRegistry._ATTRIBUTES[key].input_shape
def register(
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str = "sparseml",
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
"""
Register a model with the registry. Should be used as a decorator
:param key: the model key (name) to create
:param input_shape: the specified input shape for the model
:param domain: the domain the model belongs to; ex: cv, nlp, etc
:param sub_domain: the sub domain the model belongs to;
ex: classification, detection, etc
:param architecture: the architecture the model belongs to;
ex: resnet, mobilenet, etc
:param sub_architecture: the sub architecture the model belongs to;
ex: 50, 101, etc
:param default_dataset: the dataset to use by default for loading
pretrained if not supplied
:param default_desc: the description to use by default for loading
pretrained if not supplied
:param repo_source: the source repo for the model, default is sparseml
:param def_ignore_error_tensors: tensors to ignore if there are
errors in loading
:param desc_args: args that should be changed based on the description
:return: the decorator
"""
if not isinstance(key, List):
key = [key]
def decorator(constructor_func):
wrapped_constructor = ModelRegistry._registered_wrapper(
key[0],
constructor_func,
)
ModelRegistry.register_wrapped_model_constructor(
wrapped_constructor,
key,
input_shape,
domain,
sub_domain,
architecture,
sub_architecture,
default_dataset,
default_desc,
repo_source,
def_ignore_error_tensors,
desc_args,
)
return wrapped_constructor
return decorator
def register_wrapped_model_constructor(
wrapped_constructor: Callable,
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str,
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
"""
Register a model with the registry from a model constructor or provider function
:param wrapped_constructor: Model constructor wrapped to be compatible
by call from ModelRegistry.create should have pretrained, pretrained_path,
pretrained_dataset, load_strict, ignore_error_tensors, and kwargs as
arguments
:param key: the model key (name) to create
:param input_shape: the specified input shape for the model
:param domain: the domain the model belongs to; ex: cv, nlp, etc
:param sub_domain: the sub domain the model belongs to;
ex: classification, detection, etc
:param architecture: the architecture the model belongs to;
ex: resnet, mobilenet, etc
:param sub_architecture: the sub architecture the model belongs to;
ex: 50, 101, etc
:param default_dataset: the dataset to use by default for loading
pretrained if not supplied
:param default_desc: the description to use by default for loading
pretrained if not supplied
:param repo_source: the source repo for the model; ex: sparseml, torchvision
:param def_ignore_error_tensors: tensors to ignore if there are
errors in loading
:param desc_args: args that should be changed based on the description
:return: The constructor wrapper registered with the registry
"""
if not isinstance(key, List):
key = [key]
for r_key in key:
if r_key in ModelRegistry._CONSTRUCTORS:
raise ValueError("key {} is already registered".format(key))
ModelRegistry._CONSTRUCTORS[r_key] = wrapped_constructor
ModelRegistry._ATTRIBUTES[r_key] = _ModelAttributes(
input_shape,
domain,
sub_domain,
architecture,
sub_architecture,
default_dataset,
default_desc,
repo_source,
def_ignore_error_tensors,
desc_args,
)
def _registered_wrapper(
key: str,
constructor_func: Callable,
):
def wrapper(
pretrained_path: str = None,
pretrained: Union[bool, str] = False,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
*args,
**kwargs,
):
"""
:param pretrained_path: A path to the pretrained weights to load,
if provided will override the pretrained param. May also be
a SparseZoo stub path preceded by 'zoo:' with the optional
`?recipe_type=` argument. If given a recipe type, the base
model weights for that recipe will be loaded
:param pretrained: True to load the default pretrained weights,
a string to load a specific pretrained weight
(ex: base, optim, optim-perf),
or False to not load any pretrained weights
:param pretrained_dataset: The dataset to load pretrained weights for
(ex: imagenet, mnist, etc).
If not supplied will default to the one preconfigured for the model.
:param load_strict: True to raise an error on issues with state dict
loading from pretrained_path or pretrained, False to ignore
:param ignore_error_tensors: Tensors to ignore while checking the state dict
for weights loaded from pretrained_path or pretrained
"""
attributes = ModelRegistry._ATTRIBUTES[key]
if attributes.args and pretrained in attributes.args:
kwargs[attributes.args[pretrained][0]] = attributes.args[pretrained][1]
model = constructor_func(*args, **kwargs)
ignore = []
if ignore_error_tensors:
ignore.extend(ignore_error_tensors)
elif attributes.ignore_error_tensors:
ignore.extend(attributes.ignore_error_tensors)
if isinstance(pretrained, str):
if pretrained.lower() == "true":
pretrained = True
elif pretrained.lower() in ["false", "none"]:
pretrained = False
if pretrained_path:
load_model(pretrained_path, model, load_strict, ignore)
elif pretrained:
zoo_model = ModelRegistry.create_zoo_model(
key, pretrained, pretrained_dataset
)
try:
path = download_framework_model_by_recipe_type(zoo_model)
load_model(path, model, load_strict, ignore)
except Exception:
# try one more time with overwrite on in case file was corrupted
path = download_framework_model_by_recipe_type(zoo_model)
load_model(path, model, load_strict, ignore)
return model
return wrapper
The provided code snippet includes necessary dependencies for implementing the `yolo_v3` function. Write a Python function `def yolo_v3( num_classes: int = 80, pretrained_backbone: Union[bool, str] = False, pretrained_path_backbone: str = None, ) -> YoloV3` to solve the following problem:
Yolo-V3 model with standard DarkNet-53 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained DarkNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the DarkNet backbone. Default is None :return: the created Yolo model
Here is the function:
def yolo_v3(
num_classes: int = 80,
pretrained_backbone: Union[bool, str] = False,
pretrained_path_backbone: str = None,
) -> YoloV3:
"""
Yolo-V3 model with standard DarkNet-53 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained DarkNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
DarkNet backbone. Default is None
:return: the created Yolo model
"""
backbone = ModelRegistry.create(
"darknet53", pretrained_backbone, pretrained_path_backbone
)
backbone.as_yolo_backbone([4, 3, 2]) # set outputs to last 3 residual layers
del backbone.classifier # remove fc layer from state dict
backbone_out_channels = [1024, 512, 256]
anchor_groups = [
Tensor([[116, 90], [156, 198], [373, 326]]),
Tensor([[30, 61], [62, 45], [59, 119]]),
Tensor([[10, 13], [16, 30], [33, 23]]),
]
return YoloV3(num_classes, backbone, backbone_out_channels, anchor_groups) | Yolo-V3 model with standard DarkNet-53 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained DarkNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the DarkNet backbone. Default is None :return: the created Yolo model |
21,265 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300ResNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various ResNet sub architecture backbones
:param sub_arch: the ResNet sub architecture to use for this backbone
:param pretrained: True to load pretrained ResNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
sub_arch: Union[str, int],
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
sub_arch = str(sub_arch)
sub_architectures = ["18", "34", "50", "101", "152"]
if sub_arch not in sub_architectures:
raise ValueError(
(
"Invalid ResNet sub architecture {}." " Valid sub architectures: {}"
).format(sub_arch, sub_architectures)
)
self._sub_arch = sub_arch
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._sub_arch == "18":
return [256, 512, 512, 256, 256, 128]
elif self._sub_arch == "34":
return [256, 512, 512, 256, 256, 256]
else: # "50", "101", "152"
return [1024, 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: ResNet feature extrator module to be used for an SSD model
"""
# Load ResNet model
model_key = "resnet{}".format(self._sub_arch)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
input_layer, blocks, _ = model.children()
feature_blocks = list(blocks.children())[:3] # take first 3 ResNet blocks
feature_extractor = nn.Sequential(input_layer, *feature_blocks)
# set last section first block strides to 1
last_section_first_block = feature_extractor[-1][0]
last_section_first_block.conv1.stride = (1, 1)
last_section_first_block.conv2.stride = (1, 1)
last_section_first_block.identity.conv.stride = (1, 1)
return feature_extractor
key=["ssd300_resnet18", "ssd_resnet18"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd",
sub_architecture="resnet18_300",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300_resnet18` function. Write a Python function `def ssd300_resnet18( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300` to solve the following problem:
SSD 300 with ResNet 18 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model
Here is the function:
def ssd300_resnet18(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300:
"""
SSD 300 with ResNet 18 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained ResNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD ResNet model
"""
feature_extractor = SSD300ResNetBackbone(
"18", pretrained_backbone, pretrained_path_backbone
)
return SSD300(feature_extractor, num_classes) | SSD 300 with ResNet 18 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model |
21,266 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300ResNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various ResNet sub architecture backbones
:param sub_arch: the ResNet sub architecture to use for this backbone
:param pretrained: True to load pretrained ResNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
sub_arch: Union[str, int],
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
sub_arch = str(sub_arch)
sub_architectures = ["18", "34", "50", "101", "152"]
if sub_arch not in sub_architectures:
raise ValueError(
(
"Invalid ResNet sub architecture {}." " Valid sub architectures: {}"
).format(sub_arch, sub_architectures)
)
self._sub_arch = sub_arch
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._sub_arch == "18":
return [256, 512, 512, 256, 256, 128]
elif self._sub_arch == "34":
return [256, 512, 512, 256, 256, 256]
else: # "50", "101", "152"
return [1024, 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: ResNet feature extrator module to be used for an SSD model
"""
# Load ResNet model
model_key = "resnet{}".format(self._sub_arch)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
input_layer, blocks, _ = model.children()
feature_blocks = list(blocks.children())[:3] # take first 3 ResNet blocks
feature_extractor = nn.Sequential(input_layer, *feature_blocks)
# set last section first block strides to 1
last_section_first_block = feature_extractor[-1][0]
last_section_first_block.conv1.stride = (1, 1)
last_section_first_block.conv2.stride = (1, 1)
last_section_first_block.identity.conv.stride = (1, 1)
return feature_extractor
key=["ssd300_resnet18", "ssd_resnet18"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd",
sub_architecture="resnet18_300",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300_resnet34` function. Write a Python function `def ssd300_resnet34( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300` to solve the following problem:
SSD 300 with ResNet 34 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model
Here is the function:
def ssd300_resnet34(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300:
"""
SSD 300 with ResNet 34 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained ResNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD ResNet model
"""
feature_extractor = SSD300ResNetBackbone(
"34", pretrained_backbone, pretrained_path_backbone
)
return SSD300(feature_extractor, num_classes) | SSD 300 with ResNet 34 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model |
21,267 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300ResNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various ResNet sub architecture backbones
:param sub_arch: the ResNet sub architecture to use for this backbone
:param pretrained: True to load pretrained ResNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
sub_arch: Union[str, int],
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
sub_arch = str(sub_arch)
sub_architectures = ["18", "34", "50", "101", "152"]
if sub_arch not in sub_architectures:
raise ValueError(
(
"Invalid ResNet sub architecture {}." " Valid sub architectures: {}"
).format(sub_arch, sub_architectures)
)
self._sub_arch = sub_arch
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._sub_arch == "18":
return [256, 512, 512, 256, 256, 128]
elif self._sub_arch == "34":
return [256, 512, 512, 256, 256, 256]
else: # "50", "101", "152"
return [1024, 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: ResNet feature extrator module to be used for an SSD model
"""
# Load ResNet model
model_key = "resnet{}".format(self._sub_arch)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
input_layer, blocks, _ = model.children()
feature_blocks = list(blocks.children())[:3] # take first 3 ResNet blocks
feature_extractor = nn.Sequential(input_layer, *feature_blocks)
# set last section first block strides to 1
last_section_first_block = feature_extractor[-1][0]
last_section_first_block.conv1.stride = (1, 1)
last_section_first_block.conv2.stride = (1, 1)
last_section_first_block.identity.conv.stride = (1, 1)
return feature_extractor
key=["ssd300_resnet18", "ssd_resnet18"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd",
sub_architecture="resnet18_300",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300_resnet50` function. Write a Python function `def ssd300_resnet50( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300` to solve the following problem:
SSD 300 with ResNet 50 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model
Here is the function:
def ssd300_resnet50(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300:
"""
SSD 300 with ResNet 50 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained ResNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD ResNet model
"""
feature_extractor = SSD300ResNetBackbone(
"50", pretrained_backbone, pretrained_path_backbone
)
return SSD300(feature_extractor, num_classes) | SSD 300 with ResNet 50 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model |
21,268 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300ResNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various ResNet sub architecture backbones
:param sub_arch: the ResNet sub architecture to use for this backbone
:param pretrained: True to load pretrained ResNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
sub_arch: Union[str, int],
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
sub_arch = str(sub_arch)
sub_architectures = ["18", "34", "50", "101", "152"]
if sub_arch not in sub_architectures:
raise ValueError(
(
"Invalid ResNet sub architecture {}." " Valid sub architectures: {}"
).format(sub_arch, sub_architectures)
)
self._sub_arch = sub_arch
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._sub_arch == "18":
return [256, 512, 512, 256, 256, 128]
elif self._sub_arch == "34":
return [256, 512, 512, 256, 256, 256]
else: # "50", "101", "152"
return [1024, 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: ResNet feature extrator module to be used for an SSD model
"""
# Load ResNet model
model_key = "resnet{}".format(self._sub_arch)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
input_layer, blocks, _ = model.children()
feature_blocks = list(blocks.children())[:3] # take first 3 ResNet blocks
feature_extractor = nn.Sequential(input_layer, *feature_blocks)
# set last section first block strides to 1
last_section_first_block = feature_extractor[-1][0]
last_section_first_block.conv1.stride = (1, 1)
last_section_first_block.conv2.stride = (1, 1)
last_section_first_block.identity.conv.stride = (1, 1)
return feature_extractor
key=["ssd300_resnet18", "ssd_resnet18"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd",
sub_architecture="resnet18_300",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300_resnet101` function. Write a Python function `def ssd300_resnet101( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300` to solve the following problem:
SSD 300 with ResNet 101 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model
Here is the function:
def ssd300_resnet101(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300:
"""
SSD 300 with ResNet 101 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained ResNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD ResNet model
"""
feature_extractor = SSD300ResNetBackbone(
"101", pretrained_backbone, pretrained_path_backbone
)
return SSD300(feature_extractor, num_classes) | SSD 300 with ResNet 101 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model |
21,269 | from typing import List, Union
from torch import nn
from sparseml.pytorch.models.detection import SSD300, SSDBackbone
from sparseml.pytorch.models.registry import ModelRegistry
class SSD300ResNetBackbone(SSDBackbone):
"""
Class to provide the feature extractor and define the additional conv layers
for an SSD300 model for various ResNet sub architecture backbones
:param sub_arch: the ResNet sub architecture to use for this backbone
:param pretrained: True to load pretrained ResNet weights; to load a specific
version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path: An optional model file path to load into the created model.
Will override pretrained parameter
"""
def __init__(
self,
sub_arch: Union[str, int],
pretrained: Union[bool, str] = True,
pretrained_path: str = None,
):
sub_arch = str(sub_arch)
sub_architectures = ["18", "34", "50", "101", "152"]
if sub_arch not in sub_architectures:
raise ValueError(
(
"Invalid ResNet sub architecture {}." " Valid sub architectures: {}"
).format(sub_arch, sub_architectures)
)
self._sub_arch = sub_arch
self._pretrained = pretrained
self._pretrained_path = pretrained_path
def out_channels(self) -> List[int]:
"""
:return: The number of output channels that should be used for the
additional conv layers with this backbone
"""
if self._sub_arch == "18":
return [256, 512, 512, 256, 256, 128]
elif self._sub_arch == "34":
return [256, 512, 512, 256, 256, 256]
else: # "50", "101", "152"
return [1024, 512, 512, 256, 256, 256]
def get_feature_extractor(self) -> nn.Module:
"""
:return: ResNet feature extrator module to be used for an SSD model
"""
# Load ResNet model
model_key = "resnet{}".format(self._sub_arch)
model = ModelRegistry.create(model_key, self._pretrained, self._pretrained_path)
input_layer, blocks, _ = model.children()
feature_blocks = list(blocks.children())[:3] # take first 3 ResNet blocks
feature_extractor = nn.Sequential(input_layer, *feature_blocks)
# set last section first block strides to 1
last_section_first_block = feature_extractor[-1][0]
last_section_first_block.conv1.stride = (1, 1)
last_section_first_block.conv2.stride = (1, 1)
last_section_first_block.identity.conv.stride = (1, 1)
return feature_extractor
key=["ssd300_resnet18", "ssd_resnet18"],
input_shape=(3, 300, 300),
domain="cv",
sub_domain="detection",
architecture="ssd",
sub_architecture="resnet18_300",
default_dataset="coco",
default_desc="base",
The provided code snippet includes necessary dependencies for implementing the `ssd300_resnet152` function. Write a Python function `def ssd300_resnet152( num_classes: int = 91, pretrained_backbone: Union[bool, str] = True, pretrained_path_backbone: str = None, ) -> SSD300` to solve the following problem:
SSD 300 with ResNet 152 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model
Here is the function:
def ssd300_resnet152(
num_classes: int = 91,
pretrained_backbone: Union[bool, str] = True,
pretrained_path_backbone: str = None,
) -> SSD300:
"""
SSD 300 with ResNet 152 backbone;
expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes of objects to classify
:param pretrained_backbone: True to load pretrained ResNet weights; to load a
specific version give a string with the name of the version (optim, optim-perf).
Default is True
:param pretrained_path_backbone: An optional model file path to load into the
created model's backbone
:return: the created SSD ResNet model
"""
feature_extractor = SSD300ResNetBackbone(
"152", pretrained_backbone, pretrained_path_backbone
)
return SSD300(feature_extractor, num_classes) | SSD 300 with ResNet 152 backbone; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes of objects to classify :param pretrained_backbone: True to load pretrained ResNet weights; to load a specific version give a string with the name of the version (optim, optim-perf). Default is True :param pretrained_path_backbone: An optional model file path to load into the created model's backbone :return: the created SSD ResNet model |
21,270 | from inspect import getmembers, isfunction, signature
from typing import List, Union
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.utils import load_model
def _registry_constructor_wrapper(key, constructor_function):
# wraps the torchvision model constructor function to be compatible with sparseml
# model registry loading
def wrapper(
pretrained_path: str = None,
pretrained: Union[bool, str] = False,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
**kwargs,
):
"""
:param pretrained_path: A path to the pretrained weights to load,
if provided will override the pretrained param. May also be
a SparseZoo stub path preceded by 'zoo:' with the optional
`?recipe_type=` argument. If given a recipe type, the base
model weights for that recipe will be loaded
:param pretrained: True to load the default pretrained weights,
a string to load a specific pretrained weight
(ex: base, pruned-moderate),
or False to not load any pretrained weights
:param pretrained_dataset: The dataset to load pretrained weights for
(ex: imagenet, mnist, etc).
If not supplied will default to the one preconfigured for the model.
:param load_strict: True to raise an error on issues with state dict
loading from pretrained_path or pretrained, False to ignore
:param ignore_error_tensors: Tensors to ignore while checking the state dict
for weights loaded from pretrained_path or pretrained
"""
if isinstance(pretrained, str):
if pretrained.lower() == "true":
pretrained = True
elif pretrained.lower() in ["false", "none"]:
pretrained = False
pretrained_torchvision = pretrained is True and not pretrained_path
model = constructor_function(pretrained=pretrained_torchvision, **kwargs)
ignore_error_tensors = ignore_error_tensors or []
if pretrained_path:
load_model(pretrained_path, model, load_strict, ignore_error_tensors)
elif pretrained and not pretrained_torchvision:
zoo_model = ModelRegistry.create_zoo_model(
key, pretrained, pretrained_dataset
)
try:
paths = zoo_model.download_framework_files(extensions=[".pth"])
load_model(paths[0], model, load_strict, ignore_error_tensors)
except Exception:
# try one more time with overwrite on in case file was corrupted
paths = zoo_model.download_framework_files(
overwrite=True, extensions=[".pth"]
)
load_model(paths[0], model, load_strict, ignore_error_tensors)
return model
return wrapper
def _get_architecture(model_name):
if "_v2_x" in model_name: # shuffle net
arch, sub_arch = model_name.split("_v2_x")
sub_arch = ".".join(sub_arch.split("_")) + "x"
return arch + "_v2", sub_arch
if model_name == "googlenet":
return "inception_v1", "googlenet"
if model_name in ["alexnet", "inception_v3", "mobilenet_v2"]:
return model_name, "none"
if "mnasnet" in model_name or "squeezenet" in model_name:
arch = "mnasnet" if "mnasnet" in model_name else "squeezenet"
sub_arch = model_name.split(arch)[-1]
sub_arch = ".".join(sub_arch.split("_")) if sub_arch != "1_0" else "none"
return arch, sub_arch
if model_name.startswith("resnext"):
model_name = model_name.split("_")[0]
sub_arch = model_name.split("resnext")[-1]
return "resnext", sub_arch
if model_name.startswith("wide_"):
model_name = model_name.split("wide_")[-1]
model_name += "xwidth" # ie 2xwidth
# split model name by location of first digit (ie resnet50, vgg11)
for digit_idx, char in enumerate(model_name):
if char.isdigit():
break
if digit_idx == len(model_name) - 1 and not model_name[-1].isdigit():
arch = model_name
sub_arch = "none"
else:
arch = model_name[0:digit_idx]
sub_arch = model_name[digit_idx:]
if arch == "resnet":
arch += "_v1"
return arch, sub_arch
if torchvision_models is not None:
_register_classification_models()
class ModelRegistry(object):
"""
Registry class for creating models
"""
_CONSTRUCTORS = {} # type: Dict[str, Callable]
_ATTRIBUTES = {} # type: Dict[str, _ModelAttributes]
def available_keys() -> List[str]:
"""
:return: the keys (models) currently available in the registry
"""
return list(ModelRegistry._CONSTRUCTORS.keys())
def create(
key: Optional[str] = None,
pretrained: Union[bool, str] = False,
pretrained_path: str = None,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
**kwargs,
) -> Union[Module, Tuple[Module, Optional[str]]]:
"""
Create a new model for the given key
:param key: the model key (name) to create. If None, the key is read
from the state_dict['arch_key'] of the model.
:param pretrained: True to load pretrained weights; to load a specific version
give a string with the name of the version (pruned-moderate, base).
Default None
:param pretrained_path: A model file path to load into the created model
:param pretrained_dataset: The dataset to load for the model
:param load_strict: True to make sure all states are found in and
loaded in model, False otherwise; default True
:param ignore_error_tensors: tensors to ignore if there are errors in loading
:param kwargs: any keyword args to supply to the model constructor
:return: The instantiated model if key is given else a Tuple containing
the instantiated model and the loaded key
"""
key_copy = key
if key_copy is None:
if pretrained_path is None:
raise ValueError("Must provide a key or a pretrained_path")
if pretrained_path.startswith("zoo:"):
pretrained_path = download_framework_model_by_recipe_type(
Model(pretrained_path)
)
_checkpoint = torch.load(pretrained_path)
if "arch_key" in _checkpoint:
key_copy = _checkpoint["arch_key"]
else:
raise ValueError("No `arch_key` found in checkpoint")
if key_copy not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key_copy, ModelRegistry._CONSTRUCTORS
)
)
model = ModelRegistry._CONSTRUCTORS[key_copy](
pretrained=pretrained,
pretrained_path=pretrained_path,
pretrained_dataset=pretrained_dataset,
load_strict=load_strict,
ignore_error_tensors=ignore_error_tensors,
**kwargs,
)
return (model, key_copy) if key is None else model
def create_zoo_model(
key: str,
pretrained: Union[bool, str] = True,
pretrained_dataset: str = None,
) -> Model:
"""
Create a sparsezoo Model for the desired model in the zoo
:param key: the model key (name) to retrieve
:param pretrained: True to load pretrained weights; to load a specific version
give a string with the name of the version (optim, optim-perf), default True
:param pretrained_dataset: The dataset to load for the model
:return: the sparsezoo Model reference for the given model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
attributes = ModelRegistry._ATTRIBUTES[key]
sparse_name, sparse_category, sparse_target = parse_optimization_str(
pretrained if isinstance(pretrained, str) else attributes.default_desc
)
model_dict = {
"domain": attributes.domain,
"sub_domain": attributes.sub_domain,
"architecture": attributes.architecture,
"sub_architecture": attributes.sub_architecture,
"framework": PYTORCH_FRAMEWORK,
"repo": attributes.repo_source,
"dataset": attributes.default_dataset
if pretrained_dataset is None
else pretrained_dataset,
"sparse_tag": f"{sparse_name}-{sparse_category}",
}
stub = model_args_to_stub(**model_dict)
return Model(stub)
def input_shape(key: str) -> Any:
"""
:param key: the model key (name) to create
:return: the specified input shape for the model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
return ModelRegistry._ATTRIBUTES[key].input_shape
def register(
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str = "sparseml",
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
"""
Register a model with the registry. Should be used as a decorator
:param key: the model key (name) to create
:param input_shape: the specified input shape for the model
:param domain: the domain the model belongs to; ex: cv, nlp, etc
:param sub_domain: the sub domain the model belongs to;
ex: classification, detection, etc
:param architecture: the architecture the model belongs to;
ex: resnet, mobilenet, etc
:param sub_architecture: the sub architecture the model belongs to;
ex: 50, 101, etc
:param default_dataset: the dataset to use by default for loading
pretrained if not supplied
:param default_desc: the description to use by default for loading
pretrained if not supplied
:param repo_source: the source repo for the model, default is sparseml
:param def_ignore_error_tensors: tensors to ignore if there are
errors in loading
:param desc_args: args that should be changed based on the description
:return: the decorator
"""
if not isinstance(key, List):
key = [key]
def decorator(constructor_func):
wrapped_constructor = ModelRegistry._registered_wrapper(
key[0],
constructor_func,
)
ModelRegistry.register_wrapped_model_constructor(
wrapped_constructor,
key,
input_shape,
domain,
sub_domain,
architecture,
sub_architecture,
default_dataset,
default_desc,
repo_source,
def_ignore_error_tensors,
desc_args,
)
return wrapped_constructor
return decorator
def register_wrapped_model_constructor(
wrapped_constructor: Callable,
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
repo_source: str,
def_ignore_error_tensors: List[str] = None,
desc_args: Dict[str, Tuple[str, Any]] = None,
):
"""
Register a model with the registry from a model constructor or provider function
:param wrapped_constructor: Model constructor wrapped to be compatible
by call from ModelRegistry.create should have pretrained, pretrained_path,
pretrained_dataset, load_strict, ignore_error_tensors, and kwargs as
arguments
:param key: the model key (name) to create
:param input_shape: the specified input shape for the model
:param domain: the domain the model belongs to; ex: cv, nlp, etc
:param sub_domain: the sub domain the model belongs to;
ex: classification, detection, etc
:param architecture: the architecture the model belongs to;
ex: resnet, mobilenet, etc
:param sub_architecture: the sub architecture the model belongs to;
ex: 50, 101, etc
:param default_dataset: the dataset to use by default for loading
pretrained if not supplied
:param default_desc: the description to use by default for loading
pretrained if not supplied
:param repo_source: the source repo for the model; ex: sparseml, torchvision
:param def_ignore_error_tensors: tensors to ignore if there are
errors in loading
:param desc_args: args that should be changed based on the description
:return: The constructor wrapper registered with the registry
"""
if not isinstance(key, List):
key = [key]
for r_key in key:
if r_key in ModelRegistry._CONSTRUCTORS:
raise ValueError("key {} is already registered".format(key))
ModelRegistry._CONSTRUCTORS[r_key] = wrapped_constructor
ModelRegistry._ATTRIBUTES[r_key] = _ModelAttributes(
input_shape,
domain,
sub_domain,
architecture,
sub_architecture,
default_dataset,
default_desc,
repo_source,
def_ignore_error_tensors,
desc_args,
)
def _registered_wrapper(
key: str,
constructor_func: Callable,
):
def wrapper(
pretrained_path: str = None,
pretrained: Union[bool, str] = False,
pretrained_dataset: str = None,
load_strict: bool = True,
ignore_error_tensors: List[str] = None,
*args,
**kwargs,
):
"""
:param pretrained_path: A path to the pretrained weights to load,
if provided will override the pretrained param. May also be
a SparseZoo stub path preceded by 'zoo:' with the optional
`?recipe_type=` argument. If given a recipe type, the base
model weights for that recipe will be loaded
:param pretrained: True to load the default pretrained weights,
a string to load a specific pretrained weight
(ex: base, optim, optim-perf),
or False to not load any pretrained weights
:param pretrained_dataset: The dataset to load pretrained weights for
(ex: imagenet, mnist, etc).
If not supplied will default to the one preconfigured for the model.
:param load_strict: True to raise an error on issues with state dict
loading from pretrained_path or pretrained, False to ignore
:param ignore_error_tensors: Tensors to ignore while checking the state dict
for weights loaded from pretrained_path or pretrained
"""
attributes = ModelRegistry._ATTRIBUTES[key]
if attributes.args and pretrained in attributes.args:
kwargs[attributes.args[pretrained][0]] = attributes.args[pretrained][1]
model = constructor_func(*args, **kwargs)
ignore = []
if ignore_error_tensors:
ignore.extend(ignore_error_tensors)
elif attributes.ignore_error_tensors:
ignore.extend(attributes.ignore_error_tensors)
if isinstance(pretrained, str):
if pretrained.lower() == "true":
pretrained = True
elif pretrained.lower() in ["false", "none"]:
pretrained = False
if pretrained_path:
load_model(pretrained_path, model, load_strict, ignore)
elif pretrained:
zoo_model = ModelRegistry.create_zoo_model(
key, pretrained, pretrained_dataset
)
try:
path = download_framework_model_by_recipe_type(zoo_model)
load_model(path, model, load_strict, ignore)
except Exception:
# try one more time with overwrite on in case file was corrupted
path = download_framework_model_by_recipe_type(zoo_model)
load_model(path, model, load_strict, ignore)
return model
return wrapper
def _register_classification_models():
# find model functions in torchvision.models
for model_name, constructor_function in getmembers(torchvision_models, isfunction):
# using the "pretrained" keyword as proxy for a model function
# NOTE: pretrained param was replaced with "weights" in torchvision 0.13.0
params = signature(constructor_function).parameters
if not ("pretrained" in params or "weights" in params):
continue
key = "torchvision.{}".format(model_name)
image_size = (
(3, 224, 224) if "inception_v3" not in model_name else (3, 299, 299)
)
arch, sub_arch = _get_architecture(model_name)
# wrap model constructor for registry compatibility
wrapped_constructor = _registry_constructor_wrapper(key, constructor_function)
ModelRegistry.register_wrapped_model_constructor(
wrapped_constructor,
key=key,
input_shape=image_size,
domain="cv",
sub_domain="classification",
architecture=arch,
sub_architecture=sub_arch,
default_dataset="imagenet",
default_desc="repo",
repo_source="torchvision",
) | null |
21,271 | from typing import Tuple, Union
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
ModuleList,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_conv_linear(mod: Conv2d, stddev: float = 0.1):
# init as defined in the original torchvision setup
import scipy.stats as stats
truncnorm = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(truncnorm.rvs(mod.weight.numel()), dtype=mod.weight.dtype)
values = values.view(mod.weight.size())
with torch.no_grad():
mod.weight.copy_(values) | null |
21,272 | from typing import Tuple, Union
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
ModuleList,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,273 | from typing import Tuple, Union
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
MaxPool2d,
Module,
ModuleList,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class InceptionV3(Module):
"""
InceptionV3 implementation
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param enable_aux: True to enable the aux input for training,
calculates aux logits from an earlier point in the network
to enable smoother training as per the original paper
"""
def __init__(self, num_classes: int, class_type: str, enable_aux: bool):
super().__init__()
self.section_1 = Sequential(
_ConvBNRelu(in_channels=3, out_channels=32, kernel_size=3, stride=2)
)
self.section_2 = Sequential(
_ConvBNRelu(in_channels=32, out_channels=32, kernel_size=3),
_ConvBNRelu(in_channels=32, out_channels=64, kernel_size=3, padding=1),
MaxPool2d(kernel_size=3, stride=2),
)
self.section_3 = Sequential(
_ConvBNRelu(in_channels=64, out_channels=80, kernel_size=1)
)
self.section_4 = Sequential(
_ConvBNRelu(in_channels=80, out_channels=192, kernel_size=3),
MaxPool2d(kernel_size=3, stride=2),
)
self.section_5 = Sequential(
_BlockA(in_channels=192, pool_features=32),
_BlockA(in_channels=256, pool_features=64),
_BlockA(in_channels=288, pool_features=64),
)
self.section_6 = Sequential(
_BlockB(in_channels=288),
_BlockC(in_channels=768, channels_7x7=128),
_BlockC(in_channels=768, channels_7x7=160),
_BlockC(in_channels=768, channels_7x7=160),
_BlockC(in_channels=768, channels_7x7=192),
)
self.section_7 = Sequential(
_BlockD(in_channels=768),
_BlockE(in_channels=1280),
_BlockE(in_channels=2048),
)
self.classifier = _Classifier(
in_channels=2048, num_classes=num_classes, class_type=class_type
)
self.aux = (
_Aux(in_channels=768, num_classes=num_classes) if enable_aux else None
)
def forward(self, x_tens: Tensor) -> Tuple[Tensor, ...]:
# N x 3 x 299 x 299
out = self.section_1(x_tens)
# N x 32 x 149 x 149
out = self.section_2(out)
# N x 64 x 73 x 73
out = self.section_3(out)
# N x 80 x 73 x 73
out = self.section_4(out)
# N x 192 x 35 x 35
out = self.section_5(out)
# N x 288 x 35 x 35
out = self.section_6(out)
# N x 768 x 17 x 17
aux_inp = out
out = self.section_7(out)
# N x 2048 x 8 x 8
logits, classes = self.classifier(out)
if self.aux is None or not self.training:
return logits, classes
aux = self.aux(aux_inp)
return aux, logits, classes
key=["inceptionv3", "inception_v3", "inception-v3"],
input_shape=(3, 299, 299),
domain="cv",
sub_domain="classification",
architecture="inception_v3",
sub_architecture=None,
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=[
"classifier.fc.weight",
"classifier.fc.bias",
"aux.fc.weight",
"aux.fc.bias",
],
The provided code snippet includes necessary dependencies for implementing the `inception_v3` function. Write a Python function `def inception_v3( num_classes: int = 1000, class_type: str = "single", enable_aux: bool = True ) -> InceptionV3` to solve the following problem:
Standard InceptionV3 implementation; expected input shape is (B, 3, 299, 299) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param enable_aux: True to enable the aux input for training, calculates aux logits from an earlier point in the network to enable smoother training as per the original paper :return: The created InceptionV3 Module
Here is the function:
def inception_v3(
num_classes: int = 1000, class_type: str = "single", enable_aux: bool = True
) -> InceptionV3:
"""
Standard InceptionV3 implementation;
expected input shape is (B, 3, 299, 299)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param enable_aux: True to enable the aux input for training,
calculates aux logits from an earlier point in the network
to enable smoother training as per the original paper
:return: The created InceptionV3 Module
"""
return InceptionV3(num_classes, class_type, enable_aux) | Standard InceptionV3 implementation; expected input shape is (B, 3, 299, 299) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param enable_aux: True to enable the aux input for training, calculates aux logits from an earlier point in the network to enable smoother training as per the original paper :return: The created InceptionV3 Module |
21,274 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu") | null |
21,275 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,276 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
def _init_linear(linear: Linear):
init.normal_(linear.weight, 0, 0.01)
init.constant_(linear.bias, 0) | null |
21,277 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class MobileNet(Module):
"""
MobileNet implementation
:param sec_settings: the settings for each section in the MobileNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: dropout level for input to FC layer; setting to None performs
no dropout; default is None
"""
def __init__(
self,
sec_settings: List[MobileNetSectionSettings],
num_classes: int,
class_type: str,
dropout: Union[float, None] = None,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[MobileNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type, dropout
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: MobileNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
blocks.append(_Block(in_channels, settings.out_channels, stride))
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
def _mobilenet_base_section_settings():
return [
MobileNetSectionSettings(
num_blocks=1, in_channels=32, out_channels=64, downsample=False
),
MobileNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, downsample=True
),
MobileNetSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, downsample=True
),
MobileNetSectionSettings(
num_blocks=6, in_channels=256, out_channels=512, downsample=True
),
MobileNetSectionSettings(
num_blocks=2, in_channels=512, out_channels=1024, downsample=True
),
]
key=[
"mobilenet",
"mobilenet_100",
"mobilenet-v1",
"mobilenet-v1-100",
"mobilenet_v1",
"mobilenet_v1_100",
"mobilenetv1_1.0",
],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="mobilenet_v1",
sub_architecture="1.0",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `mobilenet` function. Write a Python function `def mobilenet(num_classes: int = 1000, class_type: str = "single") -> MobileNet` to solve the following problem:
Standard MobileNet implementation with width=1.0; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def mobilenet(num_classes: int = 1000, class_type: str = "single") -> MobileNet:
"""
Standard MobileNet implementation with width=1.0;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = _mobilenet_base_section_settings()
return MobileNet(sec_settings, num_classes, class_type) | Standard MobileNet implementation with width=1.0; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,278 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU
class MobileNet(Module):
"""
MobileNet implementation
:param sec_settings: the settings for each section in the MobileNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: dropout level for input to FC layer; setting to None performs
no dropout; default is None
"""
def __init__(
self,
sec_settings: List[MobileNetSectionSettings],
num_classes: int,
class_type: str,
dropout: Union[float, None] = None,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[MobileNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type, dropout
)
def forward(self, inp: Tensor):
out = self.input(inp)
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: MobileNetSectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
for _ in range(settings.num_blocks):
blocks.append(_Block(in_channels, settings.out_channels, stride))
in_channels = settings.out_channels
stride = 1
return Sequential(*blocks)
def _mobilenet_base_section_settings():
return [
MobileNetSectionSettings(
num_blocks=1, in_channels=32, out_channels=64, downsample=False
),
MobileNetSectionSettings(
num_blocks=2, in_channels=64, out_channels=128, downsample=True
),
MobileNetSectionSettings(
num_blocks=2, in_channels=128, out_channels=256, downsample=True
),
MobileNetSectionSettings(
num_blocks=6, in_channels=256, out_channels=512, downsample=True
),
MobileNetSectionSettings(
num_blocks=2, in_channels=512, out_channels=1024, downsample=True
),
]
key=[
"mobilenet",
"mobilenet_100",
"mobilenet-v1",
"mobilenet-v1-100",
"mobilenet_v1",
"mobilenet_v1_100",
"mobilenetv1_1.0",
],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="mobilenet_v1",
sub_architecture="1.0",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `han_mobilenet` function. Write a Python function `def han_mobilenet(num_classes: int = 1000, class_type: str = "single") -> MobileNet` to solve the following problem:
Standard MobileNet implementation with width=1.0; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def han_mobilenet(num_classes: int = 1000, class_type: str = "single") -> MobileNet:
"""
Standard MobileNet implementation with width=1.0;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = _mobilenet_base_section_settings()
return MobileNet(sec_settings, num_classes, class_type, dropout=0.2) | Standard MobileNet implementation with width=1.0; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,279 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu") | null |
21,280 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,281 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
def _init_linear(linear: Linear):
init.normal_(linear.weight, 0, 0.01)
init.constant_(linear.bias, 0) | null |
21,282 | from typing import List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import Hardswish
class DarkNetSectionSettings(object):
"""
Settings to describe how to put together a DarkNet based architecture
using user supplied configurations.
:param num_blocks: the number of residual blocks to put in the section
:param in_channels: the number of input channels to the section
:param hidden_channels: the number of hidden channels in the residual blocks
:param out_channels: the number of output channels for this sections residual blocks
:param downsample_out_channels: number of output channels to apply to an additional
convolution downsample layer. Setting to None will omit this layer. Default is
None.
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
hidden_channels: int,
out_channels: int,
downsample_out_channels: Union[int, None] = None,
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.downsample_out_channels = downsample_out_channels
class DarkNet(Module):
"""
DarkNet implementation
:param sec_settings: the settings for each section in the DarkNet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
def __init__(
self,
sec_settings: List[DarkNetSectionSettings],
num_classes: int,
class_type: str,
):
super().__init__()
self.input = _Input()
self.sections = Sequential(
*[DarkNet.create_section(settings) for settings in sec_settings]
)
self.classifier = _Classifier(
sec_settings[-1].out_channels, num_classes, class_type
)
self.backbone_outputs = []
def forward(self, inp: Tensor) -> Union[Tensor, List[Tensor]]:
out = self.input(inp)
if self.backbone_outputs:
outputs = {}
for idx, section in enumerate(self.sections):
if idx not in self.backbone_outputs:
out = section(out)
continue
if isinstance(section[-1], _DownsampleBlock):
for block in section[:-1]:
out = block(out)
outputs[idx] = out # return residual output as backbone feature
out = section[-1](out) # run downsample block
else:
out = section(out)
outputs[idx] = out
# return outputs in order of self.backbone_output indices
return [outputs[sec_idx] for sec_idx in self.backbone_outputs]
else: # defaults to classifier
out = self.sections(out)
logits, classes = self.classifier(out)
return logits, classes
def as_yolo_backbone(self, output_blocks: List[int] = None):
"""
Sets this model to output the given residual block indices as a backbone
feature extractor for a detection model such as Yolo.
:param output_blocks: indices of residual DarkNet blocks to output as backbone.
Default is the final block output.
"""
if output_blocks is None:
output_blocks = [-1]
for idx in output_blocks:
if idx < -len(self.sections) or idx >= len(self.sections):
raise ValueError(
"Index {} out of range for DarkNet model with {} sections".format(
idx, len(self.sections)
)
)
output_blocks = [ # convert negative indices
idx if idx >= 0 else idx + len(output_blocks) for idx in output_blocks
]
self.backbone_outputs = output_blocks
def as_classifier(self):
"""
Sets this model to return output as an image classifier through a final FC layer
"""
self.backbone_outputs = []
def create_section(settings: DarkNetSectionSettings) -> Sequential:
blocks = []
for _ in range(settings.num_blocks):
blocks.append(
_ResidualBlock(
settings.in_channels,
settings.hidden_channels,
settings.out_channels,
)
)
if settings.downsample_out_channels is not None:
blocks.append(
_DownsampleBlock(
settings.out_channels, settings.downsample_out_channels
)
)
return Sequential(*blocks)
key=["darknet53", "darknet_53", "darknet-53"],
input_shape=(3, 256, 256),
domain="cv",
sub_domain="classification",
architecture="darknet",
sub_architecture="53",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `darknet53` function. Write a Python function `def darknet53(num_classes: int = 1000, class_type: str = "single") -> DarkNet` to solve the following problem:
DarkNet-53 implementation as described in the Yolo v3 paper; expected input shape is (B, 3, 256, 256) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created DarkNet Module
Here is the function:
def darknet53(num_classes: int = 1000, class_type: str = "single") -> DarkNet:
"""
DarkNet-53 implementation as described in the Yolo v3 paper;
expected input shape is (B, 3, 256, 256)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created DarkNet Module
"""
sec_settings = [
DarkNetSectionSettings(1, 64, 32, 64, 128),
DarkNetSectionSettings(2, 128, 64, 128, 256),
DarkNetSectionSettings(8, 256, 128, 256, 512),
DarkNetSectionSettings(8, 512, 256, 512, 1024),
DarkNetSectionSettings(4, 1024, 512, 1024, None),
]
return DarkNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | DarkNet-53 implementation as described in the Yolo v3 paper; expected input shape is (B, 3, 256, 256) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created DarkNet Module |
21,283 | from collections import OrderedDict
from typing import Dict, List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU6
def _init_conv(conv: Conv2d):
init.kaiming_normal_(conv.weight, mode="fan_out", nonlinearity="relu") | null |
21,284 | from collections import OrderedDict
from typing import Dict, List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU6
def _init_batch_norm(norm: BatchNorm2d, weight_const: float = 1.0):
init.constant_(norm.weight, weight_const)
init.constant_(norm.bias, 0.0) | null |
21,285 | from collections import OrderedDict
from typing import Dict, List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU6
def _init_linear(linear: Linear):
init.normal_(linear.weight, 0, 0.01)
init.constant_(linear.bias, 0) | null |
21,286 | from collections import OrderedDict
from typing import Dict, List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU6
def _make_divisible(
value: float, divisor: int, min_value: Union[int, None] = None
) -> int:
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
if new_value < 0.9 * value:
new_value += divisor
return new_value | null |
21,287 | from collections import OrderedDict
from typing import Dict, List, Union
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
Softmax,
init,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import ReLU6
class MobilenetV2(Module):
"""
Standard MobileNetV2 model https://arxiv.org/abs/1801.04381
"""
def __init__(
self,
sec_settings: List[MobilenetV2SectionSettings],
num_classes: int,
class_type: str,
):
"""
:param sec_settings: the settings for each section in the mobilenet model
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
"""
super().__init__()
self.sections = Sequential(
*[MobilenetV2.create_section(settings) for settings in sec_settings]
)
self.feat_extraction = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=sec_settings[-1].out_channels,
out_channels=1280,
kernel_size=1,
bias=False,
),
),
("bn", BatchNorm2d(1280)),
("act", ReLU6(num_channels=1280, inplace=True)),
]
)
)
self.classifier = _Classifier(
in_channels=1280, num_classes=num_classes, class_type=class_type
)
def forward(self, inp: Tensor):
out = self.sections(inp)
out = self.feat_extraction(out)
logits, classes = self.classifier(out)
return logits, classes
def create_section(settings: MobilenetV2SectionSettings) -> Sequential:
blocks = []
in_channels = settings.in_channels
stride = 2 if settings.downsample else 1
exp_channels = settings.init_exp_channels
apply_exp_kwargs = settings.init_section
for _ in range(settings.num_blocks):
if apply_exp_kwargs:
blocks.append(
_InvertedResidualBlock(
in_channels,
settings.out_channels,
exp_channels,
stride,
expand_kwargs={"kernel_size": 3, "padding": 1, "stride": 2},
)
)
else:
blocks.append(
_InvertedResidualBlock(
in_channels, settings.out_channels, exp_channels, stride
)
)
in_channels = settings.out_channels
exp_channels = settings.exp_channels
stride = 1
apply_exp_kwargs = False
return Sequential(*blocks)
def mobilenet_v2_width(
width_mult: float, num_classes: int = 1000, class_type: str = "single"
) -> MobilenetV2:
"""
Standard MobileNet V2 implementation for a width multiplier;
expected input shape is (B, 3, 224, 224)
:param width_mult: the width multiplier to apply
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
sec_settings = [
MobilenetV2SectionSettings(
num_blocks=1,
in_channels=3,
out_channels=16,
exp_channels=32,
downsample=False,
init_section=True,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=2,
in_channels=16,
out_channels=24,
exp_ratio=6,
downsample=True,
init_section=False,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=3,
in_channels=24,
out_channels=32,
exp_ratio=6,
downsample=True,
init_section=False,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=4,
in_channels=32,
out_channels=64,
exp_ratio=6,
downsample=True,
init_section=False,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=3,
in_channels=64,
out_channels=96,
exp_ratio=6,
downsample=False,
init_section=False,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=3,
in_channels=96,
out_channels=160,
exp_ratio=6,
downsample=True,
init_section=False,
width_mult=width_mult,
),
MobilenetV2SectionSettings(
num_blocks=1,
in_channels=160,
out_channels=320,
exp_ratio=6,
downsample=False,
init_section=False,
width_mult=width_mult,
),
]
return MobilenetV2(sec_settings, num_classes, class_type)
key=[
"mobilenetv2",
"mobilenet_v2",
"mobilenet_v2_100",
"mobilenet-v2",
"mobilenet-v2-100",
"mobilenetv2_1.0",
],
input_shape=(3, 224, 224),
domain="cv",
sub_domain="classification",
architecture="mobilenet_v2",
sub_architecture="1.0",
default_dataset="imagenet",
default_desc="base",
def_ignore_error_tensors=["classifier.fc.weight", "classifier.fc.bias"],
The provided code snippet includes necessary dependencies for implementing the `mobilenet_v2` function. Write a Python function `def mobilenet_v2(num_classes: int = 1000, class_type: str = "single") -> MobilenetV2` to solve the following problem:
Standard MobileNet V2 implementation for a width multiplier; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module
Here is the function:
def mobilenet_v2(num_classes: int = 1000, class_type: str = "single") -> MobilenetV2:
"""
Standard MobileNet V2 implementation for a width multiplier;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created MobileNet Module
"""
return mobilenet_v2_width(
width_mult=1.0, num_classes=num_classes, class_type=class_type
) | Standard MobileNet V2 implementation for a width multiplier; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :return: The created MobileNet Module |
21,288 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
def _efficient_net_params(model_name):
# Coefficients: width, depth, dropout, in_size
params_dict = {
"efficientnet_b0": (1.0, 1.0, 0.2, 224),
"efficientnet_b1": (1.0, 1.1, 0.2, 240),
"efficientnet_b2": (1.1, 1.2, 0.3, 288),
"efficientnet_b3": (1.2, 1.4, 0.3, 300),
"efficientnet_b4": (1.4, 1.8, 0.4, 380),
"efficientnet_b5": (1.6, 2.2, 0.4, 456),
"efficientnet_b6": (1.8, 2.6, 0.5, 528),
"efficientnet_b7": (2.0, 3.1, 0.5, 600),
}
return params_dict[model_name] | null |
21,289 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b0` function. Write a Python function `def efficientnet_b0( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.2, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B0 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b0(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.2,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B0 implementation; expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.0
depth_mult = 1.0
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
) | EfficientNet B0 implementation; expected input shape is (B, 3, 224, 224) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,290 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b1` function. Write a Python function `def efficientnet_b1( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.2, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B1 implementation; expected input shape is (B, 3, 240, 240) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b1(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.2,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B1 implementation; expected input shape is (B, 3, 240, 240)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.0
depth_mult = 1.1
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
) | EfficientNet B1 implementation; expected input shape is (B, 3, 240, 240) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,291 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b2` function. Write a Python function `def efficientnet_b2( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.3, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B2 implementation; expected input shape is (B, 3, 260, 260) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b2(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.3,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B2 implementation; expected input shape is (B, 3, 260, 260)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.1
depth_mult = 1.2
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
) | EfficientNet B2 implementation; expected input shape is (B, 3, 260, 260) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,292 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b3` function. Write a Python function `def efficientnet_b3( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.3, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B3 implementation; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b3(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.3,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B3 implementation; expected input shape is (B, 3, 300, 300)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.2
depth_mult = 1.4
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
) | EfficientNet B3 implementation; expected input shape is (B, 3, 300, 300) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,293 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b4` function. Write a Python function `def efficientnet_b4( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.4, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B4 implementation; expected input shape is (B, 3, 380, 380) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b4(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.4,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B4 implementation; expected input shape is (B, 3, 380, 380)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.4
depth_mult = 1.8
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
) | EfficientNet B4 implementation; expected input shape is (B, 3, 380, 380) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,294 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b5` function. Write a Python function `def efficientnet_b5( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.4, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B5 implementation; expected input shape is (B, 3, 456, 456) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b5(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.4,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B5 implementation; expected input shape is (B, 3, 456, 456)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.6
depth_mult = 2.2
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03, "momentum": 0.01},
) | EfficientNet B5 implementation; expected input shape is (B, 3, 456, 456) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,295 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b6` function. Write a Python function `def efficientnet_b6( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.5, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B6 implementation; expected input shape is (B, 3, 528, 528) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b6(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.5,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B6 implementation; expected input shape is (B, 3, 528, 528)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.8
depth_mult = 2.6
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03, "momentum": 0.01},
) | EfficientNet B6 implementation; expected input shape is (B, 3, 528, 528) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,296 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
def _create_section_settings(
width_mult: float, depth_mult: float, se_mod: bool
) -> Tuple[List[EfficientNetSectionSettings], int]:
# return section settings as well as the out channels as tuple
return (
[
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(32, width_mult),
out_channels=_scale_num_channels(16, width_mult),
kernel_size=3,
expansion_ratio=1,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(16, width_mult),
out_channels=_scale_num_channels(24, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(2, depth_mult),
in_channels=_scale_num_channels(24, width_mult),
out_channels=_scale_num_channels(40, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(40, width_mult),
out_channels=_scale_num_channels(80, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(3, depth_mult),
in_channels=_scale_num_channels(80, width_mult),
out_channels=_scale_num_channels(112, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(4, depth_mult),
in_channels=_scale_num_channels(112, width_mult),
out_channels=_scale_num_channels(192, width_mult),
kernel_size=5,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=_scale_num_blocks(1, depth_mult),
in_channels=_scale_num_channels(192, width_mult),
out_channels=_scale_num_channels(320, width_mult),
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
],
_scale_num_channels(1280, width_mult),
)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_b7` function. Write a Python function `def efficientnet_b7( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.5, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNet B0 implementation; expected input shape is (B, 3, 600, 600) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module
Here is the function:
def efficientnet_b7(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.5,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B0 implementation; expected input shape is (B, 3, 600, 600)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 2.0
depth_mult = 3.1
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03, "momentum": 0.01},
) | EfficientNet B0 implementation; expected input shape is (B, 3, 600, 600) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module |
21,297 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNetSectionSettings(object):
"""
Settings to describe how to put together an EfficientNet architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param kernel_size: the kernel size of the depth-wise convolution
:param expansion_ratio: (in_channels * expansion_ratio) is the number of
input/output channels of the depth-wise convolution
:param stride: the stride of the depth-wise convolution
:param se_ratio: (in_channels * se_ratio) is the number of input channels
for squeeze-excite
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
kernel_size: int,
expansion_ratio: int,
stride: int,
se_ratio: Optional[float] = None,
se_mod: bool = False,
is_fused: bool = False,
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.expansion_ratio = expansion_ratio
self.stride = stride
self.se_ratio = se_ratio
self.se_mod = se_mod
self.is_fused = is_fused
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_v2_s` function. Write a Python function `def efficientnet_v2_s( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.2, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNetV2-s implementation; expected input shape is (B, 3, 384, 384) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-S Module
Here is the function:
def efficientnet_v2_s(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.2,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNetV2-s implementation; expected input shape is (B, 3, 384, 384)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:return: The created EfficientNet_V2-S Module
"""
sec_settings = [
EfficientNetSectionSettings(
num_blocks=2,
in_channels=24,
out_channels=24,
kernel_size=3,
expansion_ratio=1,
stride=1,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=4,
in_channels=24,
out_channels=48,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=4,
in_channels=48,
out_channels=64,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=6,
in_channels=64,
out_channels=128,
kernel_size=3,
expansion_ratio=4,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=9,
in_channels=128,
out_channels=160,
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=15,
in_channels=160,
out_channels=256,
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
]
return EfficientNet(
sec_settings=sec_settings,
out_channels=1280,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03},
) | EfficientNetV2-s implementation; expected input shape is (B, 3, 384, 384) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-S Module |
21,298 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNetSectionSettings(object):
"""
Settings to describe how to put together an EfficientNet architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param kernel_size: the kernel size of the depth-wise convolution
:param expansion_ratio: (in_channels * expansion_ratio) is the number of
input/output channels of the depth-wise convolution
:param stride: the stride of the depth-wise convolution
:param se_ratio: (in_channels * se_ratio) is the number of input channels
for squeeze-excite
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
kernel_size: int,
expansion_ratio: int,
stride: int,
se_ratio: Optional[float] = None,
se_mod: bool = False,
is_fused: bool = False,
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.expansion_ratio = expansion_ratio
self.stride = stride
self.se_ratio = se_ratio
self.se_mod = se_mod
self.is_fused = is_fused
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_v2_m` function. Write a Python function `def efficientnet_v2_m( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.3, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNetV2-m implementation; expected input shape is (B, 3, 480, 480) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-M Module
Here is the function:
def efficientnet_v2_m(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.3,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNetV2-m implementation; expected input shape is (B, 3, 480, 480)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:return: The created EfficientNet_V2-M Module
"""
sec_settings = [
EfficientNetSectionSettings(
num_blocks=3,
in_channels=24,
out_channels=24,
kernel_size=3,
expansion_ratio=1,
stride=1,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=5,
in_channels=24,
out_channels=48,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=5,
in_channels=48,
out_channels=80,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=7,
in_channels=80,
out_channels=160,
kernel_size=3,
expansion_ratio=4,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=14,
in_channels=160,
out_channels=176,
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=18,
in_channels=176,
out_channels=304,
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=5,
in_channels=304,
out_channels=512,
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
]
return EfficientNet(
sec_settings=sec_settings,
out_channels=1280,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03},
) | EfficientNetV2-m implementation; expected input shape is (B, 3, 480, 480) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-M Module |
21,299 | import math
from collections import OrderedDict
from typing import Any, List, Mapping, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
Sequential,
Sigmoid,
SiLU,
Softmax,
)
from sparseml.pytorch.models.registry import ModelRegistry
from sparseml.pytorch.nn import SqueezeExcite
class EfficientNetSectionSettings(object):
"""
Settings to describe how to put together an EfficientNet architecture
using user supplied configurations.
:param num_blocks: the number of blocks to put in the section
:param in_channels: the number of input channels to the section
:param out_channels: the number of output channels from the section
:param kernel_size: the kernel size of the depth-wise convolution
:param expansion_ratio: (in_channels * expansion_ratio) is the number of
input/output channels of the depth-wise convolution
:param stride: the stride of the depth-wise convolution
:param se_ratio: (in_channels * se_ratio) is the number of input channels
for squeeze-excite
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
"""
def __init__(
self,
num_blocks: int,
in_channels: int,
out_channels: int,
kernel_size: int,
expansion_ratio: int,
stride: int,
se_ratio: Optional[float] = None,
se_mod: bool = False,
is_fused: bool = False,
):
self.num_blocks = num_blocks
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.expansion_ratio = expansion_ratio
self.stride = stride
self.se_ratio = se_ratio
self.se_mod = se_mod
self.is_fused = is_fused
class EfficientNet(Module):
"""
EfficientNet implementation
:param sec_settings: the settings for each section in the vgg model
:param out_channels: the number of output channels in the classifier before the fc
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
"""
def __init__(
self,
sec_settings: List[EfficientNetSectionSettings],
out_channels: int,
num_classes: int,
class_type: str,
dropout: float,
bn_kwargs: Optional[Mapping[str, Any]] = None,
):
super().__init__()
_bn_kwargs = bn_kwargs or {}
self.input = Sequential(
OrderedDict(
[
(
"conv",
Conv2d(
in_channels=3,
out_channels=sec_settings[0].in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
),
(
"bn",
BatchNorm2d(
num_features=sec_settings[0].in_channels, **_bn_kwargs
),
),
("act", SiLU()),
]
)
)
self.sections = Sequential(
*[
EfficientNet.create_section(settings, bn_kwargs)
for settings in sec_settings
]
)
self.classifier = _Classifier(
in_channels=sec_settings[-1].out_channels,
out_channels=out_channels,
classes=num_classes,
dropout=dropout,
class_type=class_type,
bn_kwargs=bn_kwargs,
)
def forward(self, inp: Tensor) -> Tuple[Tensor, Tensor]:
feat = self.input(inp)
feat = self.sections(feat)
logits, classes = self.classifier(feat)
return logits, classes
def create_section(
settings: EfficientNetSectionSettings,
bn_kwargs: Optional[Mapping[str, Any]] = None,
) -> Sequential:
assert settings.num_blocks > 0
in_channels = settings.in_channels
stride = settings.stride
blocks = []
for block in range(settings.num_blocks):
if settings.is_fused:
blocks.append(
_FusedInvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
bn_kwargs=bn_kwargs,
)
)
else:
blocks.append(
_InvertedBottleneckBlock(
in_channels=in_channels
if block == 0
else settings.out_channels,
out_channels=settings.out_channels,
kernel_size=settings.kernel_size,
expansion_ratio=settings.expansion_ratio,
stride=stride if block == 0 else 1,
se_ratio=settings.se_ratio,
se_mod=settings.se_mod,
bn_kwargs=bn_kwargs,
)
)
return Sequential(*blocks)
The provided code snippet includes necessary dependencies for implementing the `efficientnet_v2_l` function. Write a Python function `def efficientnet_v2_l( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.4, se_mod: bool = False, ) -> EfficientNet` to solve the following problem:
EfficientNetV2-l implementation; expected input shape is (B, 3, 480, 480) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-L Module
Here is the function:
def efficientnet_v2_l(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.4,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNetV2-l implementation; expected input shape is (B, 3, 480, 480)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:return: The created EfficientNet_V2-L Module
"""
sec_settings = [
EfficientNetSectionSettings(
num_blocks=4,
in_channels=32,
out_channels=32,
kernel_size=3,
expansion_ratio=1,
stride=1,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=7,
in_channels=32,
out_channels=64,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=7,
in_channels=64,
out_channels=96,
kernel_size=3,
expansion_ratio=4,
stride=2,
is_fused=True,
),
EfficientNetSectionSettings(
num_blocks=10,
in_channels=96,
out_channels=192,
kernel_size=3,
expansion_ratio=4,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=19,
in_channels=192,
out_channels=224,
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=25,
in_channels=224,
out_channels=384,
kernel_size=3,
expansion_ratio=6,
stride=2,
se_ratio=0.25,
se_mod=se_mod,
),
EfficientNetSectionSettings(
num_blocks=7,
in_channels=384,
out_channels=640,
kernel_size=3,
expansion_ratio=6,
stride=1,
se_ratio=0.25,
se_mod=se_mod,
),
]
return EfficientNet(
sec_settings=sec_settings,
out_channels=1280,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
bn_kwargs={"eps": 1.0e-03},
) | EfficientNetV2-l implementation; expected input shape is (B, 3, 480, 480) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :return: The created EfficientNet_V2-L Module |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.